diff --git a/.github/workflows/code-test-coverage.yml b/.github/workflows/code-test-coverage.yml deleted file mode 100644 index 65c0a92f1f..0000000000 --- a/.github/workflows/code-test-coverage.yml +++ /dev/null @@ -1,76 +0,0 @@ -# Copyright 2022 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Code Test Coverage Workflow - -on: - pull_request: - branches: - - master - - develop - - push: - tags: - - 'v[0-9]+.[0-9]+.[0-9]+' - branches: - - master - - develop - -jobs: - code-test-coverage: - name: Code test coverage job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code - uses: actions/checkout@v3 - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 - with: - go-version: "1.20" - check-latest: true - - - name: Generate full test coverage report using go-acc - run: make test:coverage - - - name: Upload coverage to Codecov without token, retry on failure - env: - codecov_secret: ${{ secrets.CODECOV_TOKEN }} - if: env.codecov_secret == '' - uses: Wandalen/wretry.action@v1.0.36 - with: - attempt_limit: 5 - attempt_delay: 10000 - action: codecov/codecov-action@v3 - with: | - name: defradb-codecov - files: ./coverage.txt - flags: all-tests - os: 'linux' - fail_ci_if_error: true - verbose: true - - - name: Upload coverage to Codecov with token - env: - codecov_secret: ${{ secrets.CODECOV_TOKEN }} - if: env.codecov_secret != '' - uses: codecov/codecov-action@v3 - with: - token: ${{ env.codecov_secret }} - name: defradb-codecov - files: ./coverage.txt - flags: all-tests - os: 'linux' - fail_ci_if_error: true - verbose: true - # path_to_write_report: ./coverage/codecov_report.txt - # directory: ./coverage/reports/ diff --git a/.github/workflows/detect-change.yml b/.github/workflows/detect-change.yml deleted file mode 100644 index b6272c21cd..0000000000 --- a/.github/workflows/detect-change.yml +++ /dev/null @@ -1,55 +0,0 @@ -# Copyright 2022 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Detect Change Workflow - -on: - pull_request: - branches: - - master - - develop - - push: - tags: - - 'v[0-9]+.[0-9]+.[0-9]+' - branches: - - master - - develop - -jobs: - detect-change: - name: Detect change job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 - with: - go-version: "1.20" - check-latest: true - - - name: Build dependencies - run: | - make deps:modules - make deps:test - - - name: Run detection for changes - run: make test:changes - - ## Uncomment to enable ability to SSH into the runner. - #- name: Setup upterm ssh session for debugging - # uses: lhotari/action-upterm@v1 - # with: - # limit-access-to-actor: true - # limit-access-to-users: shahzadlone diff --git a/.github/workflows/pull-docker-image.yml b/.github/workflows/pull-docker-image.yml deleted file mode 100644 index eb0170b7ef..0000000000 --- a/.github/workflows/pull-docker-image.yml +++ /dev/null @@ -1,51 +0,0 @@ -# Copyright 2023 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -# This workflow validates that the images pushed to the container -# registries can be pulled then run sucessfully. -name: Pull Docker Image Workflow - -on: - workflow_run: - # Warning: this workflow must NOT: - # - interact with any new code. - # - checkout new code. - # - build/compile anything (only pull). - # - make any indirect calls (i.e. make xyz, or npm install, etc.) - # Note this workflow: - # - will use the base's (or default) workflow file's state. - # - doesn't run on the PR or the branch coming in, it runs on the default branch. - # - has read-write repo token - # - has access to secrets - workflows: ["Push Docker Image To Registries Workflow"] - types: - - completed - -jobs: - pull-docker-image: - name: Pull docker image job - - if: ${{ github.event.workflow_run.conclusion == 'success' }} - - runs-on: ubuntu-latest - - strategy: - fail-fast: false - matrix: - image_tag: - - sourcenetwork/defradb:latest - - ghcr.io/sourcenetwork/defradb:latest - - steps: - - name: Pull Docker image - run: docker pull ${{ matrix.image_tag }} - - - name: Test Docker image - run: docker run --rm ${{ matrix.image_tag }} diff --git a/.github/workflows/push-docker-image-to-registries.yml b/.github/workflows/push-docker-image-to-registries.yml deleted file mode 100644 index 47c4e98046..0000000000 --- a/.github/workflows/push-docker-image-to-registries.yml +++ /dev/null @@ -1,83 +0,0 @@ -# Copyright 2023 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -# This workflow builds a Docker container image, if the build is successful -# then it will deploy the image to DockerHub & GitHub container registries. -name: Push Docker Image To Registries Workflow - -on: - push: - tags: - - 'v[0-9]+.[0-9]+.[0-9]+' - -env: - TEST_TAG: sourcenetwork/defradb:test - -jobs: - push-docker-image-to-registries: - name: Push Docker image to registries job - - runs-on: ubuntu-latest - - permissions: - packages: write - contents: read - - steps: - - name: Check out the repo - uses: actions/checkout@v3 - - - name: Set up QEMU - uses: docker/setup-qemu-action@v2 - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v2 - - - name: Build Docker image - uses: docker/build-push-action@v4 - with: - context: . - file: tools/defradb.containerfile - load: true - tags: ${{ env.TEST_TAG }} - labels: ${{ steps.meta.outputs.labels }} - - - name: Test Docker image - run: docker run --rm ${{ env.TEST_TAG }} - - - name: Log in to Docker Hub - uses: docker/login-action@v2 - with: - username: ${{ secrets.DOCKER_USERNAME }} - password: ${{ secrets.DOCKER_PASSWORD }} - - - name: Log in to the Container registry - uses: docker/login-action@v2 - with: - registry: ghcr.io - username: ${{ github.actor }} - password: ${{ secrets.GITHUB_TOKEN }} - - - name: Extract metadata (tags, labels) for Docker - id: meta - uses: docker/metadata-action@v4 - with: - images: | - sourcenetwork/defradb - ghcr.io/${{ github.repository }} - - - name: Push Docker images - uses: docker/build-push-action@v4 - with: - context: . - file: tools/defradb.containerfile - push: true - tags: ${{ steps.meta.outputs.tags }} - labels: ${{ steps.meta.outputs.labels }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000..7d226a19fc --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,71 @@ +# Copyright 2023 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +name: Release workflow + +on: + workflow_dispatch: + inputs: + tag: + description: 'New tag name' + required: true + +permissions: + contents: write + packages: write + issues: write + +jobs: + goreleaser: + runs-on: ubuntu-latest + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup Go environment explicitly + uses: actions/setup-go@v3 + with: + go-version: "1.20" + check-latest: true + + - name: Apply tag + run: git tag ${{ github.event.inputs.tag }} + + - name: Build modules + run: make deps:modules + + - name: Set up QEMU + uses: docker/setup-qemu-action@v2 + + - name: Log in to Docker Hub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Log in to the Container registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Run GoReleaser + uses: goreleaser/goreleaser-action@v5 + with: + distribution: goreleaser-pro + version: latest + args: release --clean + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GITHUB_REPOSITORY: ${{ github.repository }} + GORELEASER_KEY: ${{ secrets.GORELEASER_KEY }} \ No newline at end of file diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml deleted file mode 100644 index bfa696a283..0000000000 --- a/.github/workflows/run-tests.yml +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright 2022 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Run Tests Workflow - -on: - pull_request: - branches: - - master - - develop - - push: - -jobs: - run-tests: - name: Run tests job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 - with: - go-version: "1.20" - check-latest: true - - - name: Build dependencies - run: | - make deps:modules - make deps:test - - - name: Build binary - run: make build - - # This is to ensure tests pass with a running server. - - name: Start server from binary - run: ./build/defradb start & - - - name: Run the tests, showing name of each test - run: make test:ci diff --git a/.github/workflows/test-and-upload-coverage.yml b/.github/workflows/test-and-upload-coverage.yml new file mode 100644 index 0000000000..cc3aa84a7e --- /dev/null +++ b/.github/workflows/test-and-upload-coverage.yml @@ -0,0 +1,115 @@ +# Copyright 2022 Democratized Data Foundation +# +# Use of this software is governed by the Business Source License +# included in the file licenses/BSL.txt. +# +# As of the Change Date specified in that file, in accordance with +# the Business Source License, use of this software will be governed +# by the Apache License, Version 2.0, included in the file +# licenses/APL.txt. + +name: Test And Upload Coverage Workflow + +on: + pull_request: + branches: + - master + - develop + + push: + tags: + - 'v[0-9]+.[0-9]+.[0-9]+' + branches: + - master + - develop + +jobs: + run-tests: + name: Run tests matrix job + + strategy: + matrix: + os: [ubuntu-latest] + client-type: [go, http, cli] + database-type: [badger-file, badger-memory] + mutation-type: [gql, collection-named, collection-save] + detect-changes: [false] + include: + - os: ubuntu-latest + client-type: go + database-type: badger-memory + mutation-type: collection-save + detect-changes: true + - os: macos-latest + client-type: go + database-type: badger-memory + mutation-type: collection-save + detect-changes: false + + runs-on: ${{ matrix.os }} + + env: + DEFRA_CLIENT_GO: ${{ matrix.client-type == 'go' }} + DEFRA_CLIENT_HTTP: ${{ matrix.client-type == 'http' }} + DEFRA_CLIENT_CLI: ${{ matrix.client-type == 'cli' }} + DEFRA_BADGER_MEMORY: ${{ matrix.database-type == 'badger-memory' }} + DEFRA_BADGER_FILE: ${{ matrix.database-type == 'badger-file' }} + DEFRA_MUTATION_TYPE: ${{ matrix.mutation-type }} + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Setup Go environment explicitly + uses: actions/setup-go@v3 + with: + go-version: "1.20" + check-latest: true + + - name: Build dependencies + run: | + make deps:modules + make deps:test + + - name: Run integration tests + if: ${{ !matrix.detect-changes }} + run: make test:coverage + + - name: Run change detector tests + if: ${{ matrix.detect-changes }} + run: make test:changes + + - name: Upload coverage artifact + if: ${{ !matrix.detect-changes }} + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.client-type }}_${{ matrix.database-type }}_${{ matrix.mutation-type }} + path: coverage.txt + if-no-files-found: error + retention-days: 1 + + upload-coverage: + name: Upload test code coverage job + + runs-on: ubuntu-latest + + needs: run-tests + + steps: + - name: Checkout code into the directory + uses: actions/checkout@v3 + + - name: Download coverage reports + uses: actions/download-artifact@v3 + with: + path: coverage_reports + + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v3 + with: + token: ${{ secrets.CODECOV_TOKEN }} + name: defradb-codecov + flags: all-tests + os: 'linux' + fail_ci_if_error: true + verbose: true diff --git a/.github/workflows/test-collection-named.yml b/.github/workflows/test-collection-named.yml deleted file mode 100644 index 5adabe4fdf..0000000000 --- a/.github/workflows/test-collection-named.yml +++ /dev/null @@ -1,54 +0,0 @@ -# Copyright 2023 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Run Collection Named Mutations Tests Workflow - -# This workflow runs the test suite with any supporting mutation test actions -# running their mutations via their corresponding named [Collection] call. -# -# For example, CreateDoc will call [Collection.Create], and -# UpdateDoc will call [Collection.Update]. - -on: - pull_request: - branches: - - master - - develop - - push: - tags: - - 'v[0-9]+.[0-9]+.[0-9]+' - branches: - - master - - develop - -jobs: - test-collection-named-mutations: - name: Test Collection Named Mutations job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 - with: - go-version: "1.20" - check-latest: true - - - name: Build dependencies - run: | - make deps:modules - make deps:test - - - name: Run tests with Collection Named mutations - run: make test:ci-col-named-mutations diff --git a/.github/workflows/test-gql-mutations.yml b/.github/workflows/test-gql-mutations.yml deleted file mode 100644 index 827dd22098..0000000000 --- a/.github/workflows/test-gql-mutations.yml +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2022 Democratized Data Foundation -# -# Use of this software is governed by the Business Source License -# included in the file licenses/BSL.txt. -# -# As of the Change Date specified in that file, in accordance with -# the Business Source License, use of this software will be governed -# by the Apache License, Version 2.0, included in the file -# licenses/APL.txt. - -name: Run GQL Mutations Tests Workflow - -on: - pull_request: - branches: - - master - - develop - - push: - tags: - - 'v[0-9]+.[0-9]+.[0-9]+' - branches: - - master - - develop - -jobs: - test-gql-mutations: - name: Test GQL mutations job - - runs-on: ubuntu-latest - - steps: - - name: Checkout code into the directory - uses: actions/checkout@v3 - - - name: Setup Go environment explicitly - uses: actions/setup-go@v3 - with: - go-version: "1.20" - check-latest: true - - - name: Build dependencies - run: | - make deps:modules - make deps:test - - - name: Run tests with gql mutations - run: make test:ci-gql-mutations diff --git a/.gitignore b/.gitignore index b19a6d9259..81c1a16d62 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ cmd/defradb/defradb cmd/genclidocs/genclidocs cmd/genmanpages/genmanpages coverage.txt +coverage tests/bench/*.log tests/bench/*.svg diff --git a/.goreleaser.yaml b/.goreleaser.yaml new file mode 100644 index 0000000000..4304075c08 --- /dev/null +++ b/.goreleaser.yaml @@ -0,0 +1,109 @@ +version: 1 + +dist: ./build + +before: + hooks: + - go mod tidy + - make deps:playground + +after: + hooks: + - cmd: docker pull {{ .Env.GITHUB_REPOSITORY }}:latest + - cmd: docker run --rm {{ .Env.GITHUB_REPOSITORY }}:latest + +builds: + - id: "defradb" + main: ./cmd/defradb + goos: + - linux + - windows + - darwin + goarch: + - amd64 + - arm64 + # A build with the playground included. + - id: "defradb_playground" + main: ./cmd/defradb + flags: + - -tags=playground + goos: + - linux + - windows + - darwin + goarch: + - amd64 + - arm64 + +archives: + - id: defradb_playground + builds: + - defradb_playground + format: binary + # this name template makes the OS and Arch compatible with the results of `uname`. + name_template: '{{ .Binary }}_playground_{{ .Version }}_{{ .Os }}_{{- if eq .Arch "amd64" }}x86_64{{- else }}{{ .Arch }}{{ end }}{{- if .Arm }}v{{ .Arm }}{{ end }}' + - id: defradb + builds: + - defradb + format: binary + # this name template makes the OS and Arch compatible with the results of `uname`. + name_template: '{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{- if eq .Arch "amd64" }}x86_64{{- else }}{{ .Arch }}{{ end }}{{- if .Arm }}v{{ .Arm }}{{ end }}' + +release: + target_commitish: '{{ .Commit }}' + header: | + DefraDB v{{ .Major }}.{{ .Minor }} is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + + To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v{{ .Major }}.{{ .Minor }}.x databases. If you need help migrating an existing deployment, reach out at hello@source.network or join our Discord at https://discord.source.network/. + name_template: "v{{ .Version }} Release" + +changelog: + sort: asc + abbrev: -1 + groups: + - title: Features + regexp: '^feat:.*' + order: 0 + - title: Fix + regexp: '^fix:.*' + order: 1 + - title: Tooling + regexp: '^tools:.*' + order: 2 + - title: Documentation + regexp: '^docs:.*' + order: 3 + - title: Refactoring + regexp: '^refactor:.*' + order: 4 + - title: Testing + regexp: '^test:.*' + order: 5 + +source: + enabled: true + +milestones: + - close: true + fail_on_error: false + name_template: "DefraDB v{{ .Major }}.{{ .Minor }}" + +dockers: +- ids: + - "defradb_playground" + image_templates: + - "{{ .Env.GITHUB_REPOSITORY }}:latest" + - "{{ .Env.GITHUB_REPOSITORY }}:{{ .Version }}" + - "ghcr.io/{{ .Env.GITHUB_REPOSITORY }}:{{ .Version }}" + use: buildx + build_flag_templates: + - "--pull" + - "--label=org.opencontainers.image.description=DefraDB is a Peer-to-Peer Edge Database." + - "--label=org.opencontainers.image.created={{ .Date }}" + - "--label=org.opencontainers.image.name={{ .ProjectName }}" + - "--label=org.opencontainers.image.revision={{ .FullCommit }}" + - "--label=org.opencontainers.image.version={{ .Version }}" + - "--label=org.opencontainers.image.source={{ .GitURL }}" + - "--platform=linux/amd64" + dockerfile: ./tools/goreleaser.containerfile + diff --git a/CHANGELOG.md b/CHANGELOG.md index 92e4e0cb4b..b42673927b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,77 @@ + +## [v0.8.0](https://github.com/sourcenetwork/defradb/compare/v0.7.0...v0.8.0) + +> 2023-11-14 + +DefraDB v0.8 is a major pre-production release. Until the stable version 1.0 is reached, the SemVer minor patch number will denote notable releases, which will give the project freedom to experiment and explore potentially breaking changes. + +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.7.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.source.network/. + +### Features + +* Add means to fetch schema ([#2006](https://github.com/sourcenetwork/defradb/issues/2006)) +* Rename Schema.SchemaID to Schema.Root ([#2005](https://github.com/sourcenetwork/defradb/issues/2005)) +* Enable playground in Docker build ([#1986](https://github.com/sourcenetwork/defradb/issues/1986)) +* Change GetCollectionBySchemaFoo funcs to return many ([#1984](https://github.com/sourcenetwork/defradb/issues/1984)) +* Add Swagger UI to playground ([#1979](https://github.com/sourcenetwork/defradb/issues/1979)) +* Add OpenAPI route ([#1960](https://github.com/sourcenetwork/defradb/issues/1960)) +* Remove CollectionDescription.Schema ([#1965](https://github.com/sourcenetwork/defradb/issues/1965)) +* Remove collection from patch schema ([#1957](https://github.com/sourcenetwork/defradb/issues/1957)) +* Make queries utilise secondary indexes ([#1925](https://github.com/sourcenetwork/defradb/issues/1925)) +* Allow setting of default schema version ([#1888](https://github.com/sourcenetwork/defradb/issues/1888)) +* Add CCIP Support ([#1896](https://github.com/sourcenetwork/defradb/issues/1896)) + +### Fixes + +* Fix test module relying on closed memory leak ([#2037](https://github.com/sourcenetwork/defradb/issues/2037)) +* Make return type for FieldKind_INT an int64 ([#1982](https://github.com/sourcenetwork/defradb/issues/1982)) +* Node private key requires data directory ([#1938](https://github.com/sourcenetwork/defradb/issues/1938)) +* Remove collection name from schema ID generation ([#1920](https://github.com/sourcenetwork/defradb/issues/1920)) +* Infinite loop when updating one-one relation ([#1915](https://github.com/sourcenetwork/defradb/issues/1915)) + +### Refactoring + +* CRDT merge direction ([#2016](https://github.com/sourcenetwork/defradb/issues/2016)) +* Reorganise collection description storage ([#1988](https://github.com/sourcenetwork/defradb/issues/1988)) +* Add peerstore to multistore ([#1980](https://github.com/sourcenetwork/defradb/issues/1980)) +* P2P client interface ([#1924](https://github.com/sourcenetwork/defradb/issues/1924)) +* Deprecate CollectionDescription.Schema ([#1939](https://github.com/sourcenetwork/defradb/issues/1939)) +* Remove net GRPC API ([#1927](https://github.com/sourcenetwork/defradb/issues/1927)) +* CLI client interface ([#1839](https://github.com/sourcenetwork/defradb/issues/1839)) + +### Continuous integration + +* Add goreleaser workflow ([#2040](https://github.com/sourcenetwork/defradb/issues/2040)) +* Add mac test runner ([#2035](https://github.com/sourcenetwork/defradb/issues/2035)) +* Parallelize change detector ([#1871](https://github.com/sourcenetwork/defradb/issues/1871)) + +### Chore + +* Update dependencies ([#2044](https://github.com/sourcenetwork/defradb/issues/2044)) + +### Bot + +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.10.0 to 6.11.0 in /playground ([#2053](https://github.com/sourcenetwork/defradb/issues/2053)) +* Update dependencies (bulk dependabot PRs) 13-11-2023 ([#2052](https://github.com/sourcenetwork/defradb/issues/2052)) +* Bump axios from 1.5.1 to 1.6.1 in /playground ([#2041](https://github.com/sourcenetwork/defradb/issues/2041)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.9.1 to 6.10.0 in /playground ([#2042](https://github.com/sourcenetwork/defradb/issues/2042)) +* Bump [@vitejs](https://github.com/vitejs)/plugin-react-swc from 3.4.0 to 3.4.1 in /playground ([#2022](https://github.com/sourcenetwork/defradb/issues/2022)) +* Update dependencies (bulk dependabot PRs) 08-11-2023 ([#2038](https://github.com/sourcenetwork/defradb/issues/2038)) +* Update dependencies (bulk dependabot PRs) 30-10-2023 ([#2015](https://github.com/sourcenetwork/defradb/issues/2015)) +* Bump eslint-plugin and parser from 6.8.0 to 6.9.0 in /playground ([#2000](https://github.com/sourcenetwork/defradb/issues/2000)) +* Update dependencies (bulk dependabot PRs) 16-10-2023 ([#1998](https://github.com/sourcenetwork/defradb/issues/1998)) +* Update dependencies (bulk dependabot PRs) 16-10-2023 ([#1976](https://github.com/sourcenetwork/defradb/issues/1976)) +* Bump golang.org/x/net from 0.16.0 to 0.17.0 ([#1961](https://github.com/sourcenetwork/defradb/issues/1961)) +* Bump [@types](https://github.com/types)/react-dom from 18.2.11 to 18.2.12 in /playground ([#1952](https://github.com/sourcenetwork/defradb/issues/1952)) +* Bump [@typescript](https://github.com/typescript)-eslint/eslint-plugin from 6.7.4 to 6.7.5 in /playground ([#1953](https://github.com/sourcenetwork/defradb/issues/1953)) +* Bump combined dependencies 09-10-2023 ([#1951](https://github.com/sourcenetwork/defradb/issues/1951)) +* Bump [@types](https://github.com/types)/react from 18.2.24 to 18.2.25 in /playground ([#1932](https://github.com/sourcenetwork/defradb/issues/1932)) +* Bump [@typescript](https://github.com/typescript)-eslint/parser from 6.7.3 to 6.7.4 in /playground ([#1933](https://github.com/sourcenetwork/defradb/issues/1933)) +* Bump [@vitejs](https://github.com/vitejs)/plugin-react-swc from 3.3.2 to 3.4.0 in /playground ([#1904](https://github.com/sourcenetwork/defradb/issues/1904)) +* Bump combined dependencies 19-09-2023 ([#1931](https://github.com/sourcenetwork/defradb/issues/1931)) +* Bump graphql from 16.8.0 to 16.8.1 in /playground ([#1901](https://github.com/sourcenetwork/defradb/issues/1901)) +* Update combined dependabot PRs 19-09-2023 ([#1898](https://github.com/sourcenetwork/defradb/issues/1898)) + ## [v0.7.0](https://github.com/sourcenetwork/defradb/compare/v0.6.0...v0.7.0) @@ -7,7 +81,7 @@ DefraDB v0.7 is a major pre-production release. Until the stable version 1.0 is This release has focused on robustness, testing, and schema management. Some highlight new features include notable expansions to the expressiveness of schema migrations. -To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.5.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.source.network/. +To get a full outline of the changes, we invite you to review the official changelog below. This release does include a Breaking Change to existing v0.6.x databases. If you need help migrating an existing deployment, reach out at [hello@source.network](mailto:hello@source.network) or join our Discord at https://discord.source.network/. ### Features diff --git a/Makefile b/Makefile index 21fcfcedf1..5dddc2872e 100644 --- a/Makefile +++ b/Makefile @@ -29,12 +29,16 @@ ifdef BUILD_TAGS BUILD_FLAGS+=-tags $(BUILD_TAGS) endif -TEST_FLAGS=-race -shuffle=on -timeout 300s +TEST_FLAGS=-race -shuffle=on -timeout 5m + +COVERAGE_DIRECTORY=$(PWD)/coverage +COVERAGE_FILE=coverage.txt +COVERAGE_FLAGS=-covermode=atomic -coverpkg=./... -args -test.gocoverdir=$(COVERAGE_DIRECTORY) PLAYGROUND_DIRECTORY=playground LENS_TEST_DIRECTORY=tests/integration/schema/migrations -CLI_TEST_DIRECTORY=tests/integration/cli -DEFAULT_TEST_DIRECTORIES=$$(go list ./... | grep -v -e $(LENS_TEST_DIRECTORY) -e $(CLI_TEST_DIRECTORY)) +CHANGE_DETECTOR_TEST_DIRECTORY=tests/change_detector +DEFAULT_TEST_DIRECTORIES=$$(go list ./... | grep -v -e $(LENS_TEST_DIRECTORY)) default: @go run $(BUILD_FLAGS) cmd/defradb/main.go @@ -87,11 +91,6 @@ deps\:lens: rustup target add wasm32-unknown-unknown @$(MAKE) -C ./tests/lenses build -.PHONY: deps\:coverage -deps\:coverage: - go install github.com/ory/go-acc@latest - @$(MAKE) deps:lens - .PHONY: deps\:bench deps\:bench: go install golang.org/x/perf/cmd/benchstat@latest @@ -117,7 +116,6 @@ deps: @$(MAKE) deps:modules && \ $(MAKE) deps:bench && \ $(MAKE) deps:chglog && \ - $(MAKE) deps:coverage && \ $(MAKE) deps:lint && \ $(MAKE) deps:test && \ $(MAKE) deps:mock @@ -160,6 +158,11 @@ clean: clean\:test: go clean -testcache +.PHONY: clean\:coverage +clean\:coverage: + rm -rf $(COVERAGE_DIRECTORY) + rm -f $(COVERAGE_FILE) + # Example: `make tls-certs path="~/.defradb/certs"` .PHONY: tls-certs tls-certs: @@ -185,18 +188,6 @@ test\:quick: test\:build: gotestsum --format pkgname -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) -run=nope -.PHONY: test\:ci -test\:ci: - DEFRA_BADGER_MEMORY=true DEFRA_BADGER_FILE=true \ - DEFRA_CLIENT_GO=true DEFRA_CLIENT_HTTP=true \ - $(MAKE) test:all - -.PHONY: test\:ci-gql-mutations -test\:ci-gql-mutations: - DEFRA_MUTATION_TYPE=gql DEFRA_BADGER_MEMORY=true \ - DEFRA_CLIENT_GO=true DEFRA_CLIENT_HTTP=true \ - $(MAKE) test:all - .PHONY: test\:gql-mutations test\:gql-mutations: DEFRA_MUTATION_TYPE=gql DEFRA_BADGER_MEMORY=true gotestsum --format pkgname -- $(DEFAULT_TEST_DIRECTORIES) @@ -206,12 +197,6 @@ test\:gql-mutations: # # For example, CreateDoc will call [Collection.Create], and # UpdateDoc will call [Collection.Update]. -.PHONY: test\:ci-col-named-mutations -test\:ci-col-named-mutations: - DEFRA_MUTATION_TYPE=collection-named DEFRA_BADGER_MEMORY=true \ - DEFRA_CLIENT_GO=true DEFRA_CLIENT_HTTP=true \ - $(MAKE) test:all - .PHONY: test\:col-named-mutations test\:col-named-mutations: DEFRA_MUTATION_TYPE=collection-named DEFRA_BADGER_MEMORY=true gotestsum --format pkgname -- $(DEFAULT_TEST_DIRECTORIES) @@ -224,6 +209,10 @@ test\:go: test\:http: DEFRA_CLIENT_HTTP=true go test $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) +.PHONY: test\:cli +test\:cli: + DEFRA_CLIENT_CLI=true go test $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) + .PHONY: test\:names test\:names: gotestsum --format testname -- $(DEFAULT_TEST_DIRECTORIES) $(TEST_FLAGS) @@ -232,7 +221,6 @@ test\:names: test\:all: @$(MAKE) test:names @$(MAKE) test:lens - @$(MAKE) test:cli .PHONY: test\:verbose test\:verbose: @@ -263,39 +251,33 @@ test\:lens: @$(MAKE) deps:lens gotestsum --format testname -- ./$(LENS_TEST_DIRECTORY)/... $(TEST_FLAGS) -.PHONY: test\:cli -test\:cli: - @$(MAKE) deps:lens - gotestsum --format testname -- ./$(CLI_TEST_DIRECTORY)/... $(TEST_FLAGS) - -# Using go-acc to ensure integration tests are included. -# Usage: `make test:coverage` or `make test:coverage path="{pathToPackage}"` -# Example: `make test:coverage path="./api/..."` .PHONY: test\:coverage test\:coverage: - @$(MAKE) deps:coverage + @$(MAKE) deps:lens + @$(MAKE) clean:coverage + mkdir $(COVERAGE_DIRECTORY) ifeq ($(path),) - go-acc ./... --output=coverage.txt --covermode=atomic -- -failfast -coverpkg=./... - @echo "Show coverage information for each function in ./..." + gotestsum --format testname -- ./... $(TEST_FLAGS) $(COVERAGE_FLAGS) else - go-acc $(path) --output=coverage.txt --covermode=atomic -- -failfast -coverpkg=$(path) - @echo "Show coverage information for each function in" path=$(path) + gotestsum --format testname -- $(path) $(TEST_FLAGS) $(COVERAGE_FLAGS) endif - go tool cover -func coverage.txt | grep total | awk '{print $$3}' + go tool covdata textfmt -i=$(COVERAGE_DIRECTORY) -o $(COVERAGE_FILE) + +.PHONY: test\:coverage-func +test\:coverage-func: + @$(MAKE) test:coverage + go tool cover -func=$(COVERAGE_FILE) -# Usage: `make test:coverage-html` or `make test:coverage-html path="{pathToPackage}"` -# Example: `make test:coverage-html path="./api/..."` .PHONY: test\:coverage-html test\:coverage-html: @$(MAKE) test:coverage path=$(path) - @echo "Generate coverage information in HTML" - go tool cover -html=coverage.txt - rm ./coverage.txt + go tool cover -html=$(COVERAGE_FILE) + @$(MAKE) clean:coverage + .PHONY: test\:changes test\:changes: - @$(MAKE) deps:lens - env DEFRA_DETECT_DATABASE_CHANGES=true DEFRA_CLIENT_GO=true gotestsum -- ./... -shuffle=on -p 1 + gotestsum --format testname -- ./$(CHANGE_DETECTOR_TEST_DIRECTORY)/... -timeout 15m --tags change_detector .PHONY: validate\:codecov validate\:codecov: @@ -332,7 +314,7 @@ docs: .PHONY: docs\:cli docs\:cli: - go run cmd/genclidocs/genclidocs.go -o docs/cli/ + go run cmd/genclidocs/main.go -o docs/cli/ .PHONY: docs\:manpages docs\:manpages: diff --git a/README.md b/README.md index 8428ebc77f..d77dd18f48 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -![Tests Workflow](https://github.com/sourcenetwork/defradb/actions/workflows/run-tests.yml/badge.svg) +![Tests Workflow](https://github.com/sourcenetwork/defradb/actions/workflows/test-and-upload-coverage.yml/badge.svg) [![Go Report Card](https://goreportcard.com/badge/github.com/sourcenetwork/defradb)](https://goreportcard.com/report/github.com/sourcenetwork/defradb) [![codecov](https://codecov.io/gh/sourcenetwork/defradb/branch/develop/graph/badge.svg?token=RHAORX13PA)](https://codecov.io/gh/sourcenetwork/defradb) [![Discord](https://img.shields.io/discord/427944769851752448.svg?color=768AD4&label=discord&logo=https%3A%2F%2Fdiscordapp.com%2Fassets%2F8c9701b98ad4372b58f13fd9f65f966e.svg)](https://discord.source.network/) @@ -95,7 +95,7 @@ Find more examples of schema type definitions in the [examples/schema/](examples ## Create a document -Submit a `mutation` request to create an docuement of the `User` type: +Submit a `mutation` request to create a document of the `User` type: ```shell defradb client query ' @@ -244,13 +244,20 @@ When starting a node for the first time, a key pair is generated and stored in i Each node has a unique `PeerID` generated from its public key. This ID allows other nodes to connect to it. +To view your node's peer info: + +```shell +defradb client p2p info +``` + There are two types of peer-to-peer relationships supported: **pubsub** peering and **replicator** peering. -Pubsub peering *passively* synchronizes data between nodes by broadcasting *Document Commit* updates to the topic of the commit's document key. Nodes need to be listening on the pubsub channel to receive updates. This is for when two nodes *already* have share a document and want to keep them in sync. +Pubsub peering *passively* synchronizes data between nodes by broadcasting *Document Commit* updates to the topic of the commit's document key. Nodes need to be listening on the pubsub channel to receive updates. This is for when two nodes *already* have shared a document and want to keep them in sync. Replicator peering *actively* pushes changes from a specific collection *to* a target peer. -### Pubsub example + +Pubsub example Pubsub peers can be specified on the command line using the `--peers` flag, which accepts a comma-separated list of peer [multiaddresses](https://docs.libp2p.io/concepts/addressing/). For example, a node at IP `192.168.1.12` listening on 9000 with PeerID `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B` would be referred to using the multiaddress `/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`. @@ -258,16 +265,22 @@ Let's go through an example of two nodes (*nodeA* and *nodeB*) connecting with e Start *nodeA* with a default configuration: -``` +```shell defradb start ``` -Obtain the PeerID from its console output. In this example, we use `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`, but locally it will be different. +Obtain the node's peer info: + +```shell +defradb client p2p info +``` + +In this example, we use `12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B`, but locally it will be different. For *nodeB*, we provide the following configuration: -``` -defradb start --rootdir ~/.defradb-nodeB --url localhost:9182 --p2paddr /ip4/0.0.0.0/tcp/9172 --tcpaddr /ip4/0.0.0.0/tcp/9162 --peers /ip4/0.0.0.0/tcp/9171/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B +```shell +defradb start --rootdir ~/.defradb-nodeB --url localhost:9182 --p2paddr /ip4/0.0.0.0/tcp/9172 --peers /ip4/0.0.0.0/tcp/9171/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B ``` About the flags: @@ -275,26 +288,29 @@ About the flags: - `--rootdir` specifies the root dir (config and data) to use - `--url` is the address to listen on for the client HTTP and GraphQL API - `--p2paddr` is the multiaddress for the P2P networking to listen on -- `--tcpaddr` is the multiaddress for the gRPC server to listen on - `--peers` is a comma-separated list of peer multiaddresses This starts two nodes and connects them via pubsub networking. + -### Collection subscription example + +Subscription example -It is possible to subscribe to updates on a given collection by using its ID as the pubsub topic. The ID of a collection is found as the field `collectionID` in one of its documents. Here we use the collection ID of the `User` type we created above. After setting up 2 nodes as shown in the [Pubsub example](#pubsub-example) section, we can subscribe to collections updates on *nodeA* from *nodeB* by using the `rpc p2pcollection` command: +It is possible to subscribe to updates on a given collection by using its ID as the pubsub topic. The ID of a collection is found as the field `collectionID` in one of its documents. Here we use the collection ID of the `User` type we created above. After setting up 2 nodes as shown in the [Pubsub example](#pubsub-example) section, we can subscribe to collections updates on *nodeA* from *nodeB* by using the following command: ```shell -defradb client rpc p2pcollection add --url localhost:9182 bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske +defradb client p2p collection add --url localhost:9182 bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske ``` Multiple collection IDs can be added at once. ```shell -defradb client rpc p2pcollection add --url localhost:9182 +defradb client p2p collection add --url localhost:9182 ,, ``` + -### Replicator example + +Replicator example Replicator peering is targeted: it allows a node to actively send updates to another node. Let's go through an example of *nodeA* actively replicating to *nodeB*: @@ -334,14 +350,20 @@ defradb client schema add --url localhost:9182 ' ' ``` -Set *nodeA* to actively replicate the "Article" collection to *nodeB*: +Then copy the peer info from *nodeB*: ```shell -defradb client rpc replicator set -c "Article" /ip4/0.0.0.0/tcp/9172/p2p/ +defradb client p2p info --url localhost:9182 ``` -As we add or update documents in the "Article" collection on *nodeA*, they will be actively pushed to *nodeB*. Note that changes to *nodeB* will still be passively published back to *nodeA*, via pubsub. +Set *nodeA* to actively replicate the Article collection to *nodeB*: + +```shell +defradb client p2p replicator set -c Article +``` +As we add or update documents in the Article collection on *nodeA*, they will be actively pushed to *nodeB*. Note that changes to *nodeB* will still be passively published back to *nodeA*, via pubsub. + ## Securing the HTTP API with TLS diff --git a/api/http/errors.go b/api/http/errors.go deleted file mode 100644 index 4acf9abd25..0000000000 --- a/api/http/errors.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "context" - "fmt" - "net/http" - "os" - "strings" - - "github.com/sourcenetwork/defradb/errors" -) - -var env = os.Getenv("DEFRA_ENV") - -// Errors returnable from this package. -// -// This list is incomplete. Undefined errors may also be returned. -// Errors returned from this package may be tested against these errors with errors.Is. -var ( - ErrNoListener = errors.New("cannot serve with no listener") - ErrSchema = errors.New("base must start with the http or https scheme") - ErrDatabaseNotAvailable = errors.New("no database available") - ErrFormNotSupported = errors.New("content type application/x-www-form-urlencoded not yet supported") - ErrBodyEmpty = errors.New("body cannot be empty") - ErrMissingGQLRequest = errors.New("missing GraphQL request") - ErrPeerIdUnavailable = errors.New("no PeerID available. P2P might be disabled") - ErrStreamingUnsupported = errors.New("streaming unsupported") - ErrNoEmail = errors.New("email address must be specified for tls with autocert") - ErrPayloadFormat = errors.New("invalid payload format") - ErrMissingNewKey = errors.New("missing _newKey for imported doc") -) - -// ErrorResponse is the GQL top level object holding error items for the response payload. -type ErrorResponse struct { - Errors []ErrorItem `json:"errors"` -} - -// ErrorItem hold an error message and extensions that might be pertinent to the request. -type ErrorItem struct { - Message string `json:"message"` - Extensions extensions `json:"extensions,omitempty"` -} - -type extensions struct { - Status int `json:"status"` - HTTPError string `json:"httpError"` - Stack string `json:"stack,omitempty"` -} - -func handleErr(ctx context.Context, rw http.ResponseWriter, err error, status int) { - if status == http.StatusInternalServerError { - log.ErrorE(ctx, http.StatusText(status), err) - } - - sendJSON( - ctx, - rw, - ErrorResponse{ - Errors: []ErrorItem{ - { - Message: err.Error(), - Extensions: extensions{ - Status: status, - HTTPError: http.StatusText(status), - Stack: formatError(err), - }, - }, - }, - }, - status, - ) -} - -func formatError(err error) string { - if strings.ToLower(env) == "dev" || strings.ToLower(env) == "development" { - return fmt.Sprintf("[DEV] %+v\n", err) - } - return "" -} diff --git a/api/http/errors_test.go b/api/http/errors_test.go deleted file mode 100644 index 9e4a5885c8..0000000000 --- a/api/http/errors_test.go +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "encoding/json" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" -) - -func CleanupEnv() { - env = "" -} - -func TestFormatError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "prod" - s := formatError(errors.New("test error")) - assert.Equal(t, "", s) - - env = "dev" - s = formatError(errors.New("test error")) - lines := strings.Split(s, "\n") - assert.Equal(t, "[DEV] test error", lines[0]) -} - -func TestHandleErrOnBadRequest(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - f := func(rw http.ResponseWriter, req *http.Request) { - handleErr(req.Context(), rw, errors.New("test error"), http.StatusBadRequest) - } - req, err := http.NewRequest("GET", "/test", nil) - if err != nil { - t.Fatal(err) - } - - rec := httptest.NewRecorder() - - f(rec, req) - - resp := rec.Result() - - errResponse := ErrorResponse{} - err = json.NewDecoder(resp.Body).Decode(&errResponse) - if err != nil { - t.Fatal(err) - } - - if len(errResponse.Errors) != 1 { - t.Fatal("expecting exactly one error") - } - - assert.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, http.StatusText(http.StatusBadRequest), errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "test error", errResponse.Errors[0].Message) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "[DEV] test error") -} - -func TestHandleErrOnInternalServerError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - f := func(rw http.ResponseWriter, req *http.Request) { - handleErr(req.Context(), rw, errors.New("test error"), http.StatusInternalServerError) - } - req, err := http.NewRequest("GET", "/test", nil) - if err != nil { - t.Fatal(err) - } - - rec := httptest.NewRecorder() - - f(rec, req) - - resp := rec.Result() - - errResponse := ErrorResponse{} - err = json.NewDecoder(resp.Body).Decode(&errResponse) - if err != nil { - t.Fatal(err) - } - - if len(errResponse.Errors) != 1 { - t.Fatal("expecting exactly one error") - } - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, http.StatusText(http.StatusInternalServerError), errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "test error", errResponse.Errors[0].Message) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "[DEV] test error") -} - -func TestHandleErrOnNotFound(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - f := func(rw http.ResponseWriter, req *http.Request) { - handleErr(req.Context(), rw, errors.New("test error"), http.StatusNotFound) - } - req, err := http.NewRequest("GET", "/test", nil) - if err != nil { - t.Fatal(err) - } - - rec := httptest.NewRecorder() - - f(rec, req) - - resp := rec.Result() - - errResponse := ErrorResponse{} - err = json.NewDecoder(resp.Body).Decode(&errResponse) - if err != nil { - t.Fatal(err) - } - - if len(errResponse.Errors) != 1 { - t.Fatal("expecting exactly one error") - } - - assert.Equal(t, http.StatusNotFound, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, http.StatusText(http.StatusNotFound), errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "test error", errResponse.Errors[0].Message) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "[DEV] test error") -} - -func TestHandleErrOnDefault(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - f := func(rw http.ResponseWriter, req *http.Request) { - handleErr(req.Context(), rw, errors.New("unauthorized"), http.StatusUnauthorized) - } - req, err := http.NewRequest("GET", "/test", nil) - if err != nil { - t.Fatal(err) - } - - rec := httptest.NewRecorder() - - f(rec, req) - - resp := rec.Result() - - errResponse := ErrorResponse{} - err = json.NewDecoder(resp.Body).Decode(&errResponse) - if err != nil { - t.Fatal(err) - } - - if len(errResponse.Errors) != 1 { - t.Fatal("expecting exactly one error") - } - - assert.Equal(t, http.StatusUnauthorized, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, http.StatusText(http.StatusUnauthorized), errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "unauthorized", errResponse.Errors[0].Message) - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "[DEV] unauthorized") -} diff --git a/api/http/handler.go b/api/http/handler.go deleted file mode 100644 index aa7b828f29..0000000000 --- a/api/http/handler.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "context" - "encoding/json" - "fmt" - "io" - "net/http" - - "github.com/go-chi/chi/v5" - "github.com/go-chi/cors" - "github.com/pkg/errors" - - "github.com/sourcenetwork/defradb/client" -) - -type handler struct { - db client.DB - *chi.Mux - - // user configurable options - options serverOptions -} - -// context variables -type ( - ctxDB struct{} - ctxPeerID struct{} -) - -// DataResponse is the GQL top level object holding data for the response payload. -type DataResponse struct { - Data any `json:"data"` -} - -// simpleDataResponse is a helper function that returns a DataResponse struct. -// Odd arguments are the keys and must be strings otherwise they are ignored. -// Even arguments are the values associated with the previous key. -// Odd arguments are also ignored if there are no following arguments. -func simpleDataResponse(args ...any) DataResponse { - data := make(map[string]any) - - for i := 0; i < len(args); i += 2 { - if len(args) >= i+2 { - switch a := args[i].(type) { - case string: - data[a] = args[i+1] - - default: - continue - } - } - } - - return DataResponse{ - Data: data, - } -} - -// newHandler returns a handler with the router instantiated. -func newHandler(db client.DB, opts serverOptions) *handler { - mux := chi.NewRouter() - mux.Use(loggerMiddleware) - - if len(opts.allowedOrigins) != 0 { - mux.Use(cors.Handler(cors.Options{ - AllowedOrigins: opts.allowedOrigins, - AllowedMethods: []string{"GET", "POST", "PATCH", "OPTIONS"}, - AllowedHeaders: []string{"Content-Type"}, - MaxAge: 300, - })) - } - - mux.Use(func(next http.Handler) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - if opts.tls.HasValue() { - rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains") - } - ctx := context.WithValue(req.Context(), ctxDB{}, db) - if opts.peerID != "" { - ctx = context.WithValue(ctx, ctxPeerID{}, opts.peerID) - } - next.ServeHTTP(rw, req.WithContext(ctx)) - }) - }) - - return setRoutes(&handler{ - Mux: mux, - db: db, - options: opts, - }) -} - -func getJSON(req *http.Request, v any) error { - err := json.NewDecoder(req.Body).Decode(v) - if err != nil { - return errors.Wrap(err, "unmarshal error") - } - return nil -} - -func sendJSON(ctx context.Context, rw http.ResponseWriter, v any, code int) { - rw.Header().Set("Content-Type", "application/json") - - b, err := json.Marshal(v) - if err != nil { - log.Error(ctx, fmt.Sprintf("Error while encoding JSON: %v", err)) - rw.WriteHeader(http.StatusInternalServerError) - if _, err := io.WriteString(rw, `{"error": "Internal server error"}`); err != nil { - log.Error(ctx, err.Error()) - } - return - } - - rw.WriteHeader(code) - if _, err = rw.Write(b); err != nil { - rw.WriteHeader(http.StatusInternalServerError) - log.Error(ctx, err.Error()) - } -} - -func dbFromContext(ctx context.Context) (client.DB, error) { - db, ok := ctx.Value(ctxDB{}).(client.DB) - if !ok { - return nil, ErrDatabaseNotAvailable - } - - return db, nil -} diff --git a/api/http/handler_test.go b/api/http/handler_test.go deleted file mode 100644 index 2015c7a0ba..0000000000 --- a/api/http/handler_test.go +++ /dev/null @@ -1,312 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "bytes" - "context" - "io" - "math" - "net/http" - "net/http/httptest" - "path" - "testing" - - badger "github.com/dgraph-io/badger/v4" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - - badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/db" - "github.com/sourcenetwork/defradb/logging" -) - -func TestSimpleDataResponse(t *testing.T) { - resp := simpleDataResponse("key", "value", "key2", "value2") - switch v := resp.Data.(type) { - case map[string]any: - assert.Equal(t, "value", v["key"]) - assert.Equal(t, "value2", v["key2"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } - - resp2 := simpleDataResponse("key", "value", "key2") - switch v := resp2.Data.(type) { - case map[string]any: - assert.Equal(t, "value", v["key"]) - assert.Equal(t, nil, v["key2"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } - - resp3 := simpleDataResponse("key", "value", 2, "value2") - switch v := resp3.Data.(type) { - case map[string]any: - assert.Equal(t, "value", v["key"]) - assert.Equal(t, nil, v["2"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } -} - -func TestNewHandlerWithLogger(t *testing.T) { - h := newHandler(nil, serverOptions{}) - - dir := t.TempDir() - - // send logs to temp file so we can inspect it - logFile := path.Join(dir, "http_test.log") - log.ApplyConfig(logging.Config{ - EncoderFormat: logging.NewEncoderFormatOption(logging.JSON), - OutputPaths: []string{logFile}, - }) - - req, err := http.NewRequest("GET", PingPath, nil) - if err != nil { - t.Fatal(err) - } - - rec := httptest.NewRecorder() - lrw := newLoggingResponseWriter(rec) - h.ServeHTTP(lrw, req) - assert.Equal(t, 200, rec.Result().StatusCode) - - // inspect the log file - kv, err := readLog(logFile) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "http", kv["logger"]) -} - -func TestGetJSON(t *testing.T) { - var obj struct { - Name string - } - - jsonStr := ` -{ - "Name": "John Doe" -}` - - req, err := http.NewRequest("POST", "/ping", bytes.NewBuffer([]byte(jsonStr))) - if err != nil { - t.Fatal(err) - } - - err = getJSON(req, &obj) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "John Doe", obj.Name) -} - -func TestGetJSONWithError(t *testing.T) { - var obj struct { - Name string - } - - jsonStr := ` -{ - "Name": 10 -}` - - req, err := http.NewRequest("POST", "/ping", bytes.NewBuffer([]byte(jsonStr))) - if err != nil { - t.Fatal(err) - } - - err = getJSON(req, &obj) - assert.Error(t, err) -} - -func TestSendJSONWithNoErrors(t *testing.T) { - obj := struct { - Name string - }{ - Name: "John Doe", - } - - rec := httptest.NewRecorder() - - sendJSON(context.Background(), rec, obj, 200) - - body, err := io.ReadAll(rec.Result().Body) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, []byte("{\"Name\":\"John Doe\"}"), body) -} - -func TestSendJSONWithMarshallFailure(t *testing.T) { - rec := httptest.NewRecorder() - - sendJSON(context.Background(), rec, math.Inf(1), 200) - - assert.Equal(t, http.StatusInternalServerError, rec.Result().StatusCode) -} - -type loggerTest struct { - loggingResponseWriter -} - -func (lt *loggerTest) Write(b []byte) (int, error) { - return 0, errors.New("this write will fail") -} - -func TestSendJSONWithMarshallFailureAndWriteFailer(t *testing.T) { - rec := httptest.NewRecorder() - lrw := loggerTest{} - lrw.ResponseWriter = rec - - sendJSON(context.Background(), &lrw, math.Inf(1), 200) - - assert.Equal(t, http.StatusInternalServerError, rec.Result().StatusCode) -} - -func TestSendJSONWithWriteFailure(t *testing.T) { - obj := struct { - Name string - }{ - Name: "John Doe", - } - - rec := httptest.NewRecorder() - lrw := loggerTest{} - lrw.ResponseWriter = rec - - sendJSON(context.Background(), &lrw, obj, 200) - - assert.Equal(t, http.StatusInternalServerError, lrw.statusCode) -} - -func TestDbFromContext(t *testing.T) { - _, err := dbFromContext(context.Background()) - assert.Error(t, err) - - opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} - rootstore, err := badgerds.NewDatastore("", &opts) - if err != nil { - t.Fatal(err) - } - - var options []db.Option - ctx := context.Background() - - defra, err := db.NewDB(ctx, rootstore, options...) - if err != nil { - t.Fatal(err) - } - - reqCtx := context.WithValue(ctx, ctxDB{}, defra) - - _, err = dbFromContext(reqCtx) - assert.NoError(t, err) -} - -func TestCORSRequest(t *testing.T) { - cases := []struct { - name string - method string - reqHeaders map[string]string - resHeaders map[string]string - }{ - { - "DisallowedOrigin", - "OPTIONS", - map[string]string{ - "Origin": "https://notsource.network", - }, - map[string]string{ - "Vary": "Origin", - }, - }, - { - "AllowedOrigin", - "OPTIONS", - map[string]string{ - "Origin": "https://source.network", - }, - map[string]string{ - "Access-Control-Allow-Origin": "https://source.network", - "Vary": "Origin", - }, - }, - } - - s := NewServer(nil, WithAllowedOrigins("https://source.network")) - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - req, err := http.NewRequest(c.method, PingPath, nil) - if err != nil { - t.Fatal(err) - } - - for header, value := range c.reqHeaders { - req.Header.Add(header, value) - } - - rec := httptest.NewRecorder() - - s.Handler.ServeHTTP(rec, req) - - for header, value := range c.resHeaders { - assert.Equal(t, value, rec.Result().Header.Get(header)) - } - }) - } -} - -func TestTLSRequestResponseHeader(t *testing.T) { - cases := []struct { - name string - method string - reqHeaders map[string]string - resHeaders map[string]string - }{ - { - "TLSHeader", - "GET", - map[string]string{}, - map[string]string{ - "Strict-Transport-Security": "max-age=63072000; includeSubDomains", - }, - }, - } - dir := t.TempDir() - - s := NewServer(nil, WithTLS(), WithAddress("example.com"), WithRootDir(dir)) - - for _, c := range cases { - t.Run(c.name, func(t *testing.T) { - req, err := http.NewRequest(c.method, PingPath, nil) - if err != nil { - t.Fatal(err) - } - - for header, value := range c.reqHeaders { - req.Header.Add(header, value) - } - - rec := httptest.NewRecorder() - - s.Handler.ServeHTTP(rec, req) - - for header, value := range c.resHeaders { - assert.Equal(t, value, rec.Result().Header.Get(header)) - } - }) - } -} diff --git a/api/http/handlerfuncs.go b/api/http/handlerfuncs.go deleted file mode 100644 index e4163de05f..0000000000 --- a/api/http/handlerfuncs.go +++ /dev/null @@ -1,472 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "mime" - "net/http" - - "github.com/go-chi/chi/v5" - dshelp "github.com/ipfs/boxo/datastore/dshelp" - dag "github.com/ipfs/boxo/ipld/merkledag" - "github.com/ipfs/go-cid" - ds "github.com/ipfs/go-datastore" - "github.com/multiformats/go-multihash" - - "github.com/sourcenetwork/defradb/client" - corecrdt "github.com/sourcenetwork/defradb/core/crdt" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/events" -) - -const ( - contentTypeJSON = "application/json" - contentTypeGraphQL = "application/graphql" - contentTypeFormURLEncoded = "application/x-www-form-urlencoded" -) - -func rootHandler(rw http.ResponseWriter, req *http.Request) { - sendJSON( - req.Context(), - rw, - simpleDataResponse( - "response", "Welcome to the DefraDB HTTP API. Use /graphql to send queries to the database."+ - " Read the documentation at https://docs.source.network/.", - ), - http.StatusOK, - ) -} - -func pingHandler(rw http.ResponseWriter, req *http.Request) { - sendJSON( - req.Context(), - rw, - simpleDataResponse("response", "pong"), - http.StatusOK, - ) -} - -func dumpHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - err = db.PrintDump(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("response", "ok"), - http.StatusOK, - ) -} - -type gqlRequest struct { - Request string `json:"query"` -} - -func execGQLHandler(rw http.ResponseWriter, req *http.Request) { - request := req.URL.Query().Get("query") - if request == "" { - // extract the media type from the content-type header - contentType, _, err := mime.ParseMediaType(req.Header.Get("Content-Type")) - // mime.ParseMediaType will return an error (mime: no media type) - // if there is no media type set (i.e. application/json). - // This however is not a failing condition as not setting the content-type header - // should still make for a valid request and hit our default switch case. - if err != nil && err.Error() != "mime: no media type" { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - switch contentType { - case contentTypeJSON: - gqlReq := gqlRequest{} - - err := getJSON(req, &gqlReq) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - request = gqlReq.Request - - case contentTypeFormURLEncoded: - handleErr( - req.Context(), - rw, - ErrFormNotSupported, - http.StatusBadRequest, - ) - return - - case contentTypeGraphQL: - fallthrough - - default: - if req.Body == nil { - handleErr(req.Context(), rw, ErrBodyEmpty, http.StatusBadRequest) - return - } - body, err := readWithLimit(req.Body, rw) - if err != nil { - handleErr(req.Context(), rw, errors.WithStack(err), http.StatusInternalServerError) - return - } - request = string(body) - } - } - - // if at this point request is still empty, return an error - if request == "" { - handleErr(req.Context(), rw, ErrMissingGQLRequest, http.StatusBadRequest) - return - } - - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - result := db.ExecRequest(req.Context(), request) - - if result.Pub != nil { - subscriptionHandler(result.Pub, rw, req) - return - } - - sendJSON(req.Context(), rw, newGQLResult(result.GQL), http.StatusOK) -} - -type fieldResponse struct { - ID string `json:"id"` - Name string `json:"name"` - Kind string `json:"kind"` - Internal bool `json:"internal"` -} - -type collectionResponse struct { - Name string `json:"name"` - ID string `json:"id"` - VersionID string `json:"version_id"` - Fields []fieldResponse `json:"fields,omitempty"` -} - -func listSchemaHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - cols, err := db.GetAllCollections(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - colResp := make([]collectionResponse, len(cols)) - for i, col := range cols { - var fields []fieldResponse - for _, field := range col.Schema().Fields { - fieldRes := fieldResponse{ - ID: field.ID.String(), - Name: field.Name, - Internal: field.IsInternal(), - } - if field.IsObjectArray() { - fieldRes.Kind = fmt.Sprintf("[%s]", field.Schema) - } else if field.IsObject() { - fieldRes.Kind = field.Schema - } else { - fieldRes.Kind = field.Kind.String() - } - fields = append(fields, fieldRes) - } - colResp[i] = collectionResponse{ - Name: col.Name(), - ID: col.SchemaID(), - VersionID: col.Schema().VersionID, - Fields: fields, - } - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("collections", colResp), - http.StatusOK, - ) -} - -func loadSchemaHandler(rw http.ResponseWriter, req *http.Request) { - sdl, err := readWithLimit(req.Body, rw) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - colDescs, err := db.AddSchema(req.Context(), string(sdl)) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - colResp := make([]collectionResponse, len(colDescs)) - for i, desc := range colDescs { - col, err := db.GetCollectionByName(req.Context(), desc.Name) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - colResp[i] = collectionResponse{ - Name: col.Name(), - ID: col.SchemaID(), - VersionID: col.Schema().VersionID, - } - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("result", "success", "collections", colResp), - http.StatusOK, - ) -} - -func patchSchemaHandler(rw http.ResponseWriter, req *http.Request) { - patch, err := readWithLimit(req.Body, rw) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - err = db.PatchSchema(req.Context(), string(patch)) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("result", "success"), - http.StatusOK, - ) -} - -func setMigrationHandler(rw http.ResponseWriter, req *http.Request) { - cfgStr, err := readWithLimit(req.Body, rw) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - var cfg client.LensConfig - err = json.Unmarshal(cfgStr, &cfg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - err = db.LensRegistry().SetMigration(req.Context(), cfg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("result", "success"), - http.StatusOK, - ) -} - -func getMigrationHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - cfgs, err := db.LensRegistry().Config(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("configuration", cfgs), - http.StatusOK, - ) -} - -func getBlockHandler(rw http.ResponseWriter, req *http.Request) { - cidStr := chi.URLParam(req, "cid") - - // try to parse CID - cID, err := cid.Decode(cidStr) - if err != nil { - // If we can't try to parse DSKeyToCID - // return error if we still can't - key := ds.NewKey(cidStr) - var hash multihash.Multihash - hash, err = dshelp.DsKeyToMultihash(key) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - cID = cid.NewCidV1(cid.Raw, hash) - } - - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - block, err := db.Blockstore().Get(req.Context(), cID) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - nd, err := dag.DecodeProtobuf(block.RawData()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - buf, err := nd.MarshalJSON() - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - reg := corecrdt.LWWRegister{} - delta, err := reg.DeltaDecode(nd) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - data, err := delta.Marshal() - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse( - "block", string(buf), - "delta", string(data), - "val", delta.Value(), - ), - http.StatusOK, - ) -} - -func peerIDHandler(rw http.ResponseWriter, req *http.Request) { - peerID, ok := req.Context().Value(ctxPeerID{}).(string) - if !ok || peerID == "" { - handleErr(req.Context(), rw, ErrPeerIdUnavailable, http.StatusNotFound) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse( - "peerID", peerID, - ), - http.StatusOK, - ) -} - -func subscriptionHandler(pub *events.Publisher[events.Update], rw http.ResponseWriter, req *http.Request) { - flusher, ok := rw.(http.Flusher) - if !ok { - handleErr(req.Context(), rw, ErrStreamingUnsupported, http.StatusInternalServerError) - return - } - - rw.Header().Set("Content-Type", "text/event-stream") - rw.Header().Set("Cache-Control", "no-cache") - rw.Header().Set("Connection", "keep-alive") - - for { - select { - case <-req.Context().Done(): - pub.Unsubscribe() - return - case s, open := <-pub.Stream(): - if !open { - return - } - b, err := json.Marshal(s) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - fmt.Fprintf(rw, "data: %s\n\n", b) - flusher.Flush() - } - } -} - -// maxBytes is an arbitrary limit to prevent unbounded message bodies being sent and read. -const maxBytes int64 = 100 * (1 << (10 * 2)) // 100MB - -// readWithLimit reads from the reader until either EoF or the maximum number of bytes have been read. -func readWithLimit(reader io.ReadCloser, rw http.ResponseWriter) ([]byte, error) { - reader = http.MaxBytesReader(rw, reader, maxBytes) - - var buf bytes.Buffer - _, err := io.Copy(&buf, reader) - if err != nil { - return nil, err - } - - return buf.Bytes(), nil -} diff --git a/api/http/handlerfuncs_backup.go b/api/http/handlerfuncs_backup.go deleted file mode 100644 index 3961263995..0000000000 --- a/api/http/handlerfuncs_backup.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "context" - "net/http" - "os" - "strings" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/errors" -) - -func exportHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - cfg := &client.BackupConfig{} - err = getJSON(req, cfg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - err = validateBackupConfig(req.Context(), cfg, db) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - err = db.BasicExport(req.Context(), cfg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("result", "success"), - http.StatusOK, - ) -} - -func importHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - cfg := &client.BackupConfig{} - err = getJSON(req, cfg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - err = validateBackupConfig(req.Context(), cfg, db) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - err = db.BasicImport(req.Context(), cfg.Filepath) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("result", "success"), - http.StatusOK, - ) -} - -func validateBackupConfig(ctx context.Context, cfg *client.BackupConfig, db client.DB) error { - if !isValidPath(cfg.Filepath) { - return errors.New("invalid file path") - } - - if cfg.Format != "" && strings.ToLower(cfg.Format) != "json" { - return errors.New("only JSON format is supported at the moment") - } - for _, colName := range cfg.Collections { - _, err := db.GetCollectionByName(ctx, colName) - if err != nil { - return errors.Wrap("collection does not exist", err) - } - } - return nil -} - -func isValidPath(filepath string) bool { - // if a file exists, return true - if _, err := os.Stat(filepath); err == nil { - return true - } - - // if not, attempt to write to the path and if successful, - // remove the file and return true - var d []byte - if err := os.WriteFile(filepath, d, 0o644); err == nil { - _ = os.Remove(filepath) - return true - } - - return false -} diff --git a/api/http/handlerfuncs_backup_test.go b/api/http/handlerfuncs_backup_test.go deleted file mode 100644 index 67af6015a1..0000000000 --- a/api/http/handlerfuncs_backup_test.go +++ /dev/null @@ -1,623 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "bytes" - "context" - "encoding/json" - "net/http" - "os" - "testing" - - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/client/mocks" - "github.com/sourcenetwork/defradb/errors" -) - -func TestExportHandler_WithNoDB_NoDatabaseAvailableError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: ExportPath, - Body: nil, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestExportHandler_WithWrongPayload_ReturnError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - buf := bytes.NewBuffer([]byte("[]")) - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "json: cannot unmarshal array into Go value of type client.BackupConfig") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "unmarshal error: json: cannot unmarshal array into Go value of type client.BackupConfig", errResponse.Errors[0].Message) -} - -func TestExportHandler_WithInvalidFilePath_ReturnError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - filepath := t.TempDir() + "/some/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid file path") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "invalid file path", errResponse.Errors[0].Message) -} - -func TestExportHandler_WithInvalidFomat_ReturnError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - Format: "csv", - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "only JSON format is supported at the moment") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "only JSON format is supported at the moment", errResponse.Errors[0].Message) -} - -func TestExportHandler_WithInvalidCollection_ReturnError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - Format: "json", - Collections: []string{"invalid"}, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "collection does not exist: datastore: key not found") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "collection does not exist: datastore: key not found", errResponse.Errors[0].Message) -} - -func TestExportHandler_WithBasicExportError_ReturnError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - db := mocks.NewDB(t) - testError := errors.New("test error") - db.EXPECT().BasicExport(mock.Anything, mock.Anything).Return(testError) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: db, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "test error") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "test error", errResponse.Errors[0].Message) -} - -func TestExportHandler_AllCollections_NoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - testLoadSchema(t, ctx, defra) - - col, err := defra.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - respBody := testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 200, - }) - - b, err = os.ReadFile(filepath) - require.NoError(t, err) - - require.Equal( - t, - `{"data":{"result":"success"}}`, - string(respBody), - ) - - require.Equal( - t, - `{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`, - string(b), - ) -} - -func TestExportHandler_UserCollection_NoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - testLoadSchema(t, ctx, defra) - - col, err := defra.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - Collections: []string{"User"}, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - respBody := testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 200, - }) - - b, err = os.ReadFile(filepath) - require.NoError(t, err) - - require.Equal( - t, - `{"data":{"result":"success"}}`, - string(respBody), - ) - - require.Equal( - t, - `{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`, - string(b), - ) -} - -func TestExportHandler_UserCollectionWithModifiedDoc_NoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - testLoadSchema(t, ctx, defra) - - col, err := defra.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - err = doc.Set("points", 1000) - require.NoError(t, err) - - err = col.Update(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - Collections: []string{"User"}, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - respBody := testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ExportPath, - Body: buf, - ExpectedStatus: 200, - }) - - b, err = os.ReadFile(filepath) - require.NoError(t, err) - - require.Equal( - t, - `{"data":{"result":"success"}}`, - string(respBody), - ) - - require.Equal( - t, - `{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-36697142-d46a-57b1-b25e-6336706854ea","age":31,"name":"Bob","points":1000,"verified":true}]}`, - string(b), - ) -} - -func TestImportHandler_WithNoDB_NoDatabaseAvailableError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: ImportPath, - Body: nil, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestImportHandler_WithWrongPayloadFormat_UnmarshalError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - buf := bytes.NewBuffer([]byte(`[]`)) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ImportPath, - Body: buf, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - require.Contains( - t, - errResponse.Errors[0].Extensions.Stack, - "json: cannot unmarshal array into Go value of type client.BackupConfig", - ) - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal( - t, - "unmarshal error: json: cannot unmarshal array into Go value of type client.BackupConfig", - errResponse.Errors[0].Message, - ) -} - -func TestImportHandler_WithInvalidFilepath_ReturnError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - filepath := t.TempDir() + "/some/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ImportPath, - Body: buf, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid file path") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "invalid file path", errResponse.Errors[0].Message) -} - -func TestImportHandler_WithDBClosed_DatastoreClosedError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defra.Close(ctx) - - filepath := t.TempDir() + "/test.json" - cfg := client.BackupConfig{ - Filepath: filepath, - } - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ImportPath, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "datastore closed") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "datastore closed", errResponse.Errors[0].Message) -} - -func TestImportHandler_WithUnknownCollection_KeyNotFoundError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - filepath := t.TempDir() + "/test.json" - err := os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`), - 0644, - ) - require.NoError(t, err) - - cfg := client.BackupConfig{ - Filepath: filepath, - } - - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ImportPath, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "failed to get collection: datastore: key not found. Name: User") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "failed to get collection: datastore: key not found. Name: User", errResponse.Errors[0].Message) -} - -func TestImportHandler_UserCollection_NoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - testLoadSchema(t, ctx, defra) - - filepath := t.TempDir() + "/test.json" - err := os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`), - 0644, - ) - require.NoError(t, err) - - cfg := client.BackupConfig{ - Filepath: filepath, - } - - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - resp := DataResponse{} - _ = testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ImportPath, - Body: buf, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - switch v := resp.Data.(type) { - case map[string]any: - require.Equal(t, "success", v["result"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } - - doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) - require.NoError(t, err) - - col, err := defra.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - importedDoc, err := col.Get(ctx, doc.Key(), false) - require.NoError(t, err) - - require.Equal(t, doc.Key().String(), importedDoc.Key().String()) -} - -func TestImportHandler_WithExistingDoc_DocumentExistError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - testLoadSchema(t, ctx, defra) - - col, err := defra.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"age": 31, "verified": true, "points": 90, "name": "Bob"}`)) - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - err = os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","_newKey":"bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab","age":31,"name":"Bob","points":90,"verified":true}]}`), - 0644, - ) - require.NoError(t, err) - - cfg := client.BackupConfig{ - Filepath: filepath, - } - - b, err := json.Marshal(cfg) - require.NoError(t, err) - buf := bytes.NewBuffer(b) - - errResponse := ErrorResponse{} - _ = testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: ImportPath, - QueryParams: map[string]string{"collections": "User"}, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains( - t, - errResponse.Errors[0].Extensions.Stack, - "failed to save a new doc to collection: a document with the given dockey already exists", - ) - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal( - t, - "failed to save a new doc to collection: a document with the given dockey already exists. DocKey: bae-91171025-ed21-50e3-b0dc-e31bccdfa1ab", - errResponse.Errors[0].Message, - ) -} diff --git a/api/http/handlerfuncs_index.go b/api/http/handlerfuncs_index.go deleted file mode 100644 index e8d10d900e..0000000000 --- a/api/http/handlerfuncs_index.go +++ /dev/null @@ -1,144 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "net/http" - "strings" - - "github.com/sourcenetwork/defradb/client" -) - -func createIndexHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - var data map[string]string - err = getJSON(req, &data) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - colNameArg := data["collection"] - fieldsArg := data["fields"] - indexNameArg := data["name"] - - col, err := db.GetCollectionByName(req.Context(), colNameArg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - fields := strings.Split(fieldsArg, ",") - fieldDescriptions := make([]client.IndexedFieldDescription, 0, len(fields)) - for _, field := range fields { - fieldDescriptions = append(fieldDescriptions, client.IndexedFieldDescription{Name: field}) - } - indexDesc := client.IndexDescription{ - Name: indexNameArg, - Fields: fieldDescriptions, - } - indexDesc, err = col.CreateIndex(req.Context(), indexDesc) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("index", indexDesc), - http.StatusOK, - ) -} - -func dropIndexHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - var data map[string]string - err = getJSON(req, &data) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusBadRequest) - return - } - - colNameArg := data["collection"] - indexNameArg := data["name"] - - col, err := db.GetCollectionByName(req.Context(), colNameArg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - err = col.DropIndex(req.Context(), indexNameArg) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - sendJSON( - req.Context(), - rw, - simpleDataResponse("result", "success"), - http.StatusOK, - ) -} - -func listIndexHandler(rw http.ResponseWriter, req *http.Request) { - db, err := dbFromContext(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - - queryParams := req.URL.Query() - collectionParam := queryParams.Get("collection") - - if collectionParam == "" { - indexesPerCol, err := db.GetAllIndexes(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - sendJSON( - req.Context(), - rw, - simpleDataResponse("collections", indexesPerCol), - http.StatusOK, - ) - } else { - col, err := db.GetCollectionByName(req.Context(), collectionParam) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - indexes, err := col.GetIndexes(req.Context()) - if err != nil { - handleErr(req.Context(), rw, err, http.StatusInternalServerError) - return - } - sendJSON( - req.Context(), - rw, - simpleDataResponse("indexes", indexes), - http.StatusOK, - ) - } -} diff --git a/api/http/handlerfuncs_index_test.go b/api/http/handlerfuncs_index_test.go deleted file mode 100644 index 3e82249ef8..0000000000 --- a/api/http/handlerfuncs_index_test.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "bytes" - "context" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/client/mocks" - "github.com/sourcenetwork/defradb/errors" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -func addDBToContext(t *testing.T, req *http.Request, db *mocks.DB) *http.Request { - if db == nil { - db = mocks.NewDB(t) - } - ctx := context.WithValue(req.Context(), ctxDB{}, db) - return req.WithContext(ctx) -} - -func TestCreateIndexHandler_IfNoDBInContext_ReturnError(t *testing.T) { - handler := http.HandlerFunc(createIndexHandler) - assert.HTTPBodyContains(t, handler, "POST", IndexPath, nil, "no database available") -} - -func TestCreateIndexHandler_IfFailsToParseParams_ReturnError(t *testing.T) { - req, err := http.NewRequest("POST", IndexPath, bytes.NewBuffer([]byte("invalid map"))) - if err != nil { - t.Fatal(err) - } - req = addDBToContext(t, req, nil) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(createIndexHandler) - - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusBadRequest, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), "invalid character", "handler returned unexpected body") -} - -func TestCreateIndexHandler_IfFailsToGetCollection_ReturnError(t *testing.T) { - testError := errors.New("test error") - db := mocks.NewDB(t) - db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(nil, testError) - - req, err := http.NewRequest("POST", IndexPath, bytes.NewBuffer([]byte(`{}`))) - if err != nil { - t.Fatal(err) - } - - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(createIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} - -func TestCreateIndexHandler_IfFailsToCreateIndex_ReturnError(t *testing.T) { - testError := errors.New("test error") - col := mocks.NewCollection(t) - col.EXPECT().CreateIndex(mock.Anything, mock.Anything). - Return(client.IndexDescription{}, testError) - - db := mocks.NewDB(t) - db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(col, nil) - - req, err := http.NewRequest("POST", IndexPath, bytes.NewBuffer([]byte(`{}`))) - if err != nil { - t.Fatal(err) - } - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(createIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} - -func TestDropIndexHandler_IfNoDBInContext_ReturnError(t *testing.T) { - handler := http.HandlerFunc(dropIndexHandler) - assert.HTTPBodyContains(t, handler, "DELETE", IndexPath, nil, "no database available") -} - -func TestDropIndexHandler_IfFailsToParseParams_ReturnError(t *testing.T) { - req, err := http.NewRequest("DELETE", IndexPath, bytes.NewBuffer([]byte("invalid map"))) - if err != nil { - t.Fatal(err) - } - req = addDBToContext(t, req, nil) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(dropIndexHandler) - - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusBadRequest, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), "invalid character", "handler returned unexpected body") -} - -func TestDropIndexHandler_IfFailsToGetCollection_ReturnError(t *testing.T) { - testError := errors.New("test error") - db := mocks.NewDB(t) - db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(nil, testError) - - req, err := http.NewRequest("DELETE", IndexPath, bytes.NewBuffer([]byte(`{}`))) - if err != nil { - t.Fatal(err) - } - - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(dropIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} - -func TestDropIndexHandler_IfFailsToDropIndex_ReturnError(t *testing.T) { - testError := errors.New("test error") - col := mocks.NewCollection(t) - col.EXPECT().DropIndex(mock.Anything, mock.Anything).Return(testError) - - db := mocks.NewDB(t) - db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(col, nil) - - req, err := http.NewRequest("DELETE", IndexPath, bytes.NewBuffer([]byte(`{}`))) - if err != nil { - t.Fatal(err) - } - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(dropIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} - -func TestListIndexHandler_IfNoDBInContext_ReturnError(t *testing.T) { - handler := http.HandlerFunc(listIndexHandler) - assert.HTTPBodyContains(t, handler, "GET", IndexPath, nil, "no database available") -} - -func TestListIndexHandler_IfFailsToGetAllIndexes_ReturnError(t *testing.T) { - testError := errors.New("test error") - db := mocks.NewDB(t) - db.EXPECT().GetAllIndexes(mock.Anything).Return(nil, testError) - - req, err := http.NewRequest("GET", IndexPath, bytes.NewBuffer([]byte(`{}`))) - if err != nil { - t.Fatal(err) - } - - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(listIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} - -func TestListIndexHandler_IfFailsToGetCollection_ReturnError(t *testing.T) { - testError := errors.New("test error") - db := mocks.NewDB(t) - db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(nil, testError) - - u, _ := url.Parse("http://defradb.com" + IndexPath) - params := url.Values{} - params.Add("collection", "testCollection") - u.RawQuery = params.Encode() - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - t.Fatal(err) - } - - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(listIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} - -func TestListIndexHandler_IfFailsToCollectionGetIndexes_ReturnError(t *testing.T) { - testError := errors.New("test error") - col := mocks.NewCollection(t) - col.EXPECT().GetIndexes(mock.Anything).Return(nil, testError) - - db := mocks.NewDB(t) - db.EXPECT().GetCollectionByName(mock.Anything, mock.Anything).Return(col, nil) - - u, _ := url.Parse("http://defradb.com" + IndexPath) - params := url.Values{} - params.Add("collection", "testCollection") - u.RawQuery = params.Encode() - - req, err := http.NewRequest("GET", u.String(), nil) - if err != nil { - t.Fatal(err) - } - req = addDBToContext(t, req, db) - - rr := httptest.NewRecorder() - handler := http.HandlerFunc(listIndexHandler) - handler.ServeHTTP(rr, req) - - assert.Equal(t, http.StatusInternalServerError, rr.Code, "handler returned wrong status code") - assert.Contains(t, rr.Body.String(), testError.Error()) -} diff --git a/api/http/handlerfuncs_test.go b/api/http/handlerfuncs_test.go deleted file mode 100644 index bef944f908..0000000000 --- a/api/http/handlerfuncs_test.go +++ /dev/null @@ -1,1184 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - badger "github.com/dgraph-io/badger/v4" - dshelp "github.com/ipfs/boxo/datastore/dshelp" - "github.com/ipfs/go-cid" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" - badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/db" - "github.com/sourcenetwork/defradb/errors" -) - -type testOptions struct { - Testing *testing.T - DB client.DB - Handlerfunc http.HandlerFunc - Method string - Path string - Body io.Reader - Headers map[string]string - QueryParams map[string]string - ExpectedStatus int - ResponseData any - ServerOptions serverOptions -} - -type testUser struct { - Key string `json:"_key"` - Versions []testVersion `json:"_version"` -} - -type testVersion struct { - CID string `json:"cid"` -} - -func TestRootHandler(t *testing.T) { - resp := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: RootPath, - Body: nil, - ExpectedStatus: 200, - ResponseData: &resp, - }) - switch v := resp.Data.(type) { - case map[string]any: - require.Equal(t, "Welcome to the DefraDB HTTP API. Use /graphql to send queries to the database. Read the documentation at https://docs.source.network/.", v["response"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } -} - -func TestPingHandler(t *testing.T) { - resp := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: PingPath, - Body: nil, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - switch v := resp.Data.(type) { - case map[string]any: - require.Equal(t, "pong", v["response"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } -} - -func TestDumpHandlerWithNoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - resp := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "GET", - Path: DumpPath, - Body: nil, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - switch v := resp.Data.(type) { - case map[string]any: - require.Equal(t, "ok", v["response"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } -} - -func TestDumpHandlerWithDBError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: DumpPath, - Body: nil, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestExecGQLWithNilBody(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: GraphQLPath, - Body: nil, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "body cannot be empty") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "body cannot be empty", errResponse.Errors[0].Message) -} - -func TestExecGQLWithEmptyBody(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: GraphQLPath, - Body: bytes.NewBuffer([]byte("")), - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "missing GraphQL request") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "missing GraphQL request", errResponse.Errors[0].Message) -} - -type mockReadCloser struct { - mock.Mock -} - -func (m *mockReadCloser) Read(p []byte) (n int, err error) { - args := m.Called(p) - return args.Int(0), args.Error(1) -} - -func TestExecGQLWithMockBody(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - mockReadCloser := mockReadCloser{} - // if Read is called, it will return error - mockReadCloser.On("Read", mock.AnythingOfType("[]uint8")).Return(0, errors.New("error reading")) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: GraphQLPath, - Body: &mockReadCloser, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "error reading") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "error reading", errResponse.Errors[0].Message) -} - -func TestExecGQLWithInvalidContentType(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - stmt := ` -mutation { - create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { - _key - } -}` - - buf := bytes.NewBuffer([]byte(stmt)) - testRequest(testOptions{ - Testing: t, - Method: "POST", - Path: GraphQLPath, - Body: buf, - ExpectedStatus: 500, - Headers: map[string]string{"Content-Type": contentTypeJSON + "; this-is-wrong"}, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "mime: invalid media parameter") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "mime: invalid media parameter", errResponse.Errors[0].Message) -} - -func TestExecGQLWithNoDB(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - stmt := ` -mutation { - create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { - _key - } -}` - - buf := bytes.NewBuffer([]byte(stmt)) - testRequest(testOptions{ - Testing: t, - Method: "POST", - Path: GraphQLPath, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestExecGQLHandlerContentTypeJSONWithJSONError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - // statement with JSON formatting error - stmt := ` -[ - "query": "mutation { - create_User( - data: \"{ - \\\"age\\\": 31, - \\\"verified\\\": true, - \\\"points\\\": 90, - \\\"name\\\": \\\"Bob\\\" - }\" - ) {_key} - }" -]` - - buf := bytes.NewBuffer([]byte(stmt)) - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: GraphQLPath, - Body: buf, - Headers: map[string]string{"Content-Type": contentTypeJSON}, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "invalid character") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "unmarshal error: invalid character ':' after array element", errResponse.Errors[0].Message) -} - -func TestExecGQLHandlerContentTypeJSON(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // load schema - testLoadSchema(t, ctx, defra) - - // add document - stmt := ` -{ - "query": "mutation { - create_User( - data: \"{ - \\\"age\\\": 31, - \\\"verified\\\": true, - \\\"points\\\": 90, - \\\"name\\\": \\\"Bob\\\" - }\" - ) {_key} - }" -}` - // remove line returns and tabulation from formatted statement - stmt = strings.ReplaceAll(strings.ReplaceAll(stmt, "\t", ""), "\n", "") - - buf := bytes.NewBuffer([]byte(stmt)) - users := []testUser{} - resp := DataResponse{ - Data: &users, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - Headers: map[string]string{"Content-Type": contentTypeJSON}, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - require.Contains(t, users[0].Key, "bae-") -} - -func TestExecGQLHandlerContentTypeJSONWithError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // load schema - testLoadSchema(t, ctx, defra) - - // add document - stmt := ` - { - "query": "mutation { - create_User( - data: \"{ - \\\"age\\\": 31, - \\\"notAField\\\": true - }\" - ) {_key} - }" - }` - - // remove line returns and tabulation from formatted statement - stmt = strings.ReplaceAll(strings.ReplaceAll(stmt, "\t", ""), "\n", "") - - buf := bytes.NewBuffer([]byte(stmt)) - resp := GQLResult{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - Headers: map[string]string{"Content-Type": contentTypeJSON}, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - require.Contains(t, resp.Errors, "The given field does not exist. Name: notAField") - require.Len(t, resp.Errors, 1) -} - -func TestExecGQLHandlerContentTypeJSONWithCharset(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // load schema - testLoadSchema(t, ctx, defra) - - // add document - stmt := ` -{ - "query": "mutation { - create_User( - data: \"{ - \\\"age\\\": 31, - \\\"verified\\\": true, - \\\"points\\\": 90, - \\\"name\\\": \\\"Bob\\\" - }\" - ) {_key} - }" -}` - // remote line returns and tabulation from formatted statement - stmt = strings.ReplaceAll(strings.ReplaceAll(stmt, "\t", ""), "\n", "") - - buf := bytes.NewBuffer([]byte(stmt)) - users := []testUser{} - resp := DataResponse{ - Data: &users, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - Headers: map[string]string{"Content-Type": contentTypeJSON + "; charset=utf8"}, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - require.Contains(t, users[0].Key, "bae-") -} - -func TestExecGQLHandlerContentTypeFormURLEncoded(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: GraphQLPath, - Body: nil, - Headers: map[string]string{"Content-Type": contentTypeFormURLEncoded}, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "content type application/x-www-form-urlencoded not yet supported") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "content type application/x-www-form-urlencoded not yet supported", errResponse.Errors[0].Message) -} - -func TestExecGQLHandlerContentTypeGraphQL(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // load schema - testLoadSchema(t, ctx, defra) - - // add document - stmt := ` -mutation { - create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { - _key - } -}` - - buf := bytes.NewBuffer([]byte(stmt)) - users := []testUser{} - resp := DataResponse{ - Data: &users, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - Headers: map[string]string{"Content-Type": contentTypeGraphQL}, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - require.Contains(t, users[0].Key, "bae-") -} - -func TestExecGQLHandlerContentTypeText(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // load schema - testLoadSchema(t, ctx, defra) - - // add document - stmt := ` -mutation { - create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { - _key - } -}` - - buf := bytes.NewBuffer([]byte(stmt)) - users := []testUser{} - resp := DataResponse{ - Data: &users, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - require.Contains(t, users[0].Key, "bae-") -} - -func TestExecGQLHandlerWithSubsctiption(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // load schema - testLoadSchema(t, ctx, defra) - - stmt := ` -subscription { - User { - _key - age - name - } -}` - - buf := bytes.NewBuffer([]byte(stmt)) - - ch := make(chan []byte) - errCh := make(chan error) - - // We need to set a timeout otherwise the testSubscriptionRequest function will block until the - // http.ServeHTTP call returns, which in this case will only happen with a timeout. - ctxTimeout, cancel := context.WithTimeout(ctx, time.Second) - defer cancel() - - go testSubscriptionRequest(ctxTimeout, testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - Headers: map[string]string{"Content-Type": contentTypeGraphQL}, - ExpectedStatus: 200, - }, ch, errCh) - - // We wait to ensure the subscription requests can subscribe to the event channel. - time.Sleep(time.Second / 2) - - // add document - stmt2 := ` -mutation { - create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { - _key - } -}` - - buf2 := bytes.NewBuffer([]byte(stmt2)) - users := []testUser{} - resp := DataResponse{ - Data: &users, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf2, - ExpectedStatus: 200, - ResponseData: &resp, - }) - select { - case data := <-ch: - require.Contains(t, string(data), users[0].Key) - case err := <-errCh: - t.Fatal(err) - } -} - -func TestListSchemaHandlerWithoutDB(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: SchemaPath, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - assert.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - assert.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - assert.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - assert.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestListSchemaHandlerWitNoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - stmt := ` -type user { - name: String - age: Int - verified: Boolean - points: Float -} -type group { - owner: user - members: [user] -}` - - _, err := defra.AddSchema(ctx, stmt) - if err != nil { - t.Fatal(err) - } - - resp := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "GET", - Path: SchemaPath, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - switch v := resp.Data.(type) { - case map[string]any: - assert.Equal(t, map[string]any{ - "collections": []any{ - map[string]any{ - "name": "group", - "id": "bafkreieunyhcyupkdppyo2g4zcqtdxvj5xi4f422gp2jwene6ohndvcobe", - "version_id": "bafkreieunyhcyupkdppyo2g4zcqtdxvj5xi4f422gp2jwene6ohndvcobe", - "fields": []any{ - map[string]any{ - "id": "0", - "kind": "ID", - "name": "_key", - "internal": true, - }, - map[string]any{ - "id": "1", - "kind": "[user]", - "name": "members", - "internal": false, - }, - map[string]any{ - "id": "2", - "kind": "user", - "name": "owner", - "internal": false, - }, - map[string]any{ - "id": "3", - "kind": "ID", - "name": "owner_id", - "internal": true, - }, - }, - }, - map[string]any{ - "name": "user", - "id": "bafkreigrucdl7x3lsa4xwgz2bn7lbqmiwkifnspgx7hlkpaal3o55325bq", - "version_id": "bafkreigrucdl7x3lsa4xwgz2bn7lbqmiwkifnspgx7hlkpaal3o55325bq", - "fields": []any{ - map[string]any{ - "id": "0", - "kind": "ID", - "name": "_key", - "internal": true, - }, - map[string]any{ - "id": "1", - "kind": "Int", - "name": "age", - "internal": false, - }, - map[string]any{ - "id": "2", - "kind": "String", - "name": "name", - "internal": false, - }, - map[string]any{ - "id": "3", - "kind": "Float", - "name": "points", - "internal": false, - }, - map[string]any{ - "id": "4", - "kind": "Boolean", - "name": "verified", - "internal": false, - }, - }, - }, - }, - }, v) - - default: - t.Fatalf("data should be of type map[string]any but got %T\n%v", resp.Data, v) - } -} - -func TestLoadSchemaHandlerWithReadBodyError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - mockReadCloser := mockReadCloser{} - // if Read is called, it will return error - mockReadCloser.On("Read", mock.AnythingOfType("[]uint8")).Return(0, errors.New("error reading")) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: SchemaPath, - Body: &mockReadCloser, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "error reading") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "error reading", errResponse.Errors[0].Message) -} - -func TestLoadSchemaHandlerWithoutDB(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - stmt := ` -type User { - name: String - age: Int - verified: Boolean - points: Float -}` - - buf := bytes.NewBuffer([]byte(stmt)) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "POST", - Path: SchemaPath, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestLoadSchemaHandlerWithAddSchemaError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - // statement with types instead of type - stmt := ` -types User { - name: String - age: Int - verified: Boolean - points: Float -}` - - buf := bytes.NewBuffer([]byte(stmt)) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: SchemaPath, - Body: buf, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "Syntax Error GraphQL (2:1) Unexpected Name") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal( - t, - "Syntax Error GraphQL (2:1) Unexpected Name \"types\"\n\n1: \n2: types User {\n ^\n3: \\u0009name: String\n", - errResponse.Errors[0].Message, - ) -} - -func TestLoadSchemaHandlerWitNoError(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - stmt := ` -type User { - name: String - age: Int - verified: Boolean - points: Float -}` - - buf := bytes.NewBuffer([]byte(stmt)) - - resp := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: SchemaPath, - Body: buf, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - switch v := resp.Data.(type) { - case map[string]any: - require.Equal(t, map[string]any{ - "result": "success", - "collections": []any{ - map[string]any{ - "name": "User", - "id": "bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske", - "version_id": "bafkreibpnvkvjqvg4skzlijka5xe63zeu74ivcjwd76q7yi65jdhwqhske", - }, - }, - }, v) - - default: - t.Fatalf("data should be of type map[string]any but got %T\n%v", resp.Data, v) - } -} - -func TestGetBlockHandlerWithMultihashError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: BlocksPath + "/1234", - Body: nil, - ExpectedStatus: 400, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "illegal base32 data at input byte 0") - require.Equal(t, http.StatusBadRequest, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Bad Request", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "illegal base32 data at input byte 0", errResponse.Errors[0].Message) -} - -func TestGetBlockHandlerWithDSKeyWithNoDB(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - cID, err := cid.Parse("bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm") - if err != nil { - t.Fatal(err) - } - dsKey := dshelp.MultihashToDsKey(cID.Hash()) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: BlocksPath + dsKey.String(), - Body: nil, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestGetBlockHandlerWithNoDB(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: BlocksPath + "/bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm", - Body: nil, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no database available") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no database available", errResponse.Errors[0].Message) -} - -func TestGetBlockHandlerWithGetBlockstoreError(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "GET", - Path: BlocksPath + "/bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm", - Body: nil, - ExpectedStatus: 500, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "ipld: could not find bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm") - require.Equal(t, http.StatusInternalServerError, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Internal Server Error", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "ipld: could not find bafybeidembipteezluioakc2zyke4h5fnj4rr3uaougfyxd35u3qzefzhm", errResponse.Errors[0].Message) -} - -func TestGetBlockHandlerWithValidBlockstore(t *testing.T) { - ctx := context.Background() - defra := testNewInMemoryDB(t, ctx) - defer defra.Close(ctx) - - testLoadSchema(t, ctx, defra) - - // add document - stmt := ` -mutation { - create_User(data: "{\"age\": 31, \"verified\": true, \"points\": 90, \"name\": \"Bob\"}") { - _key - } -}` - - buf := bytes.NewBuffer([]byte(stmt)) - - users := []testUser{} - resp := DataResponse{ - Data: &users, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf, - ExpectedStatus: 200, - ResponseData: &resp, - }) - - if !strings.Contains(users[0].Key, "bae-") { - t.Fatal("expected valid document key") - } - - // get document cid - stmt2 := ` -query { - User (dockey: "%s") { - _version { - cid - } - } -}` - buf2 := bytes.NewBuffer([]byte(fmt.Sprintf(stmt2, users[0].Key))) - - users2 := []testUser{} - resp2 := DataResponse{ - Data: &users2, - } - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "POST", - Path: GraphQLPath, - Body: buf2, - ExpectedStatus: 200, - ResponseData: &resp2, - }) - - _, err := cid.Decode(users2[0].Versions[0].CID) - if err != nil { - t.Fatal(err) - } - - resp3 := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: defra, - Method: "GET", - Path: BlocksPath + "/" + users2[0].Versions[0].CID, - Body: buf, - ExpectedStatus: 200, - ResponseData: &resp3, - }) - - switch d := resp3.Data.(type) { - case map[string]any: - switch val := d["val"].(type) { - case string: - require.Equal(t, "pGNhZ2UYH2RuYW1lY0JvYmZwb2ludHMYWmh2ZXJpZmllZPU=", val) - default: - t.Fatalf("expecting string but got %T", val) - } - default: - t.Fatalf("expecting map[string]any but got %T", d) - } -} - -func TestPeerIDHandler(t *testing.T) { - resp := DataResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: PeerIDPath, - Body: nil, - ExpectedStatus: 200, - ResponseData: &resp, - ServerOptions: serverOptions{ - peerID: "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", - }, - }) - - switch v := resp.Data.(type) { - case map[string]any: - require.Equal(t, "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", v["peerID"]) - default: - t.Fatalf("data should be of type map[string]any but got %T", resp.Data) - } -} - -func TestPeerIDHandlerWithNoPeerIDInContext(t *testing.T) { - t.Cleanup(CleanupEnv) - env = "dev" - - errResponse := ErrorResponse{} - testRequest(testOptions{ - Testing: t, - DB: nil, - Method: "GET", - Path: PeerIDPath, - Body: nil, - ExpectedStatus: 404, - ResponseData: &errResponse, - }) - - require.Contains(t, errResponse.Errors[0].Extensions.Stack, "no PeerID available. P2P might be disabled") - require.Equal(t, http.StatusNotFound, errResponse.Errors[0].Extensions.Status) - require.Equal(t, "Not Found", errResponse.Errors[0].Extensions.HTTPError) - require.Equal(t, "no PeerID available. P2P might be disabled", errResponse.Errors[0].Message) -} - -func testRequest(opt testOptions) []byte { - req, err := http.NewRequest(opt.Method, opt.Path, opt.Body) - if err != nil { - opt.Testing.Fatal(err) - } - - for k, v := range opt.Headers { - req.Header.Set(k, v) - } - - q := req.URL.Query() - for k, v := range opt.QueryParams { - q.Add(k, v) - } - req.URL.RawQuery = q.Encode() - - h := newHandler(opt.DB, opt.ServerOptions) - rec := httptest.NewRecorder() - h.ServeHTTP(rec, req) - assert.Equal(opt.Testing, opt.ExpectedStatus, rec.Result().StatusCode) - - resBody, err := io.ReadAll(rec.Result().Body) - if err != nil { - opt.Testing.Fatal(err) - } - - if opt.ResponseData != nil { - err = json.Unmarshal(resBody, &opt.ResponseData) - if err != nil { - opt.Testing.Fatal(err) - } - } - - return resBody -} - -func testSubscriptionRequest(ctx context.Context, opt testOptions, ch chan []byte, errCh chan error) { - req, err := http.NewRequest(opt.Method, opt.Path, opt.Body) - if err != nil { - errCh <- err - return - } - - req = req.WithContext(ctx) - - for k, v := range opt.Headers { - req.Header.Set(k, v) - } - - h := newHandler(opt.DB, opt.ServerOptions) - rec := httptest.NewRecorder() - h.ServeHTTP(rec, req) - require.Equal(opt.Testing, opt.ExpectedStatus, rec.Result().StatusCode) - - respBody, err := io.ReadAll(rec.Result().Body) - if err != nil { - errCh <- err - return - } - - ch <- respBody -} - -func testNewInMemoryDB(t *testing.T, ctx context.Context) client.DB { - // init in memory DB - opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} - rootstore, err := badgerds.NewDatastore("", &opts) - if err != nil { - t.Fatal(err) - } - - options := []db.Option{ - db.WithUpdateEvents(), - } - - defra, err := db.NewDB(ctx, rootstore, options...) - if err != nil { - t.Fatal(err) - } - - return defra -} - -func testLoadSchema(t *testing.T, ctx context.Context, db client.DB) { - stmt := ` -type User { - name: String - age: Int - verified: Boolean - points: Float -}` - _, err := db.AddSchema(ctx, stmt) - if err != nil { - t.Fatal(err) - } -} diff --git a/api/http/logger.go b/api/http/logger.go deleted file mode 100644 index 2a91a271c2..0000000000 --- a/api/http/logger.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "net/http" - "time" - - "github.com/sourcenetwork/defradb/logging" -) - -type loggingResponseWriter struct { - statusCode int - contentLength int - - http.ResponseWriter -} - -func newLoggingResponseWriter(w http.ResponseWriter) *loggingResponseWriter { - return &loggingResponseWriter{ - statusCode: http.StatusOK, - contentLength: 0, - ResponseWriter: w, - } -} - -func (lrw *loggingResponseWriter) Flush() { - lrw.ResponseWriter.(http.Flusher).Flush() -} - -func (lrw *loggingResponseWriter) Header() http.Header { - return lrw.ResponseWriter.Header() -} - -func (lrw *loggingResponseWriter) WriteHeader(code int) { - lrw.statusCode = code - lrw.ResponseWriter.WriteHeader(code) -} - -func (lrw *loggingResponseWriter) Write(b []byte) (int, error) { - lrw.contentLength = len(b) - return lrw.ResponseWriter.Write(b) -} - -func loggerMiddleware(next http.Handler) http.Handler { - return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - start := time.Now() - lrw := newLoggingResponseWriter(rw) - next.ServeHTTP(lrw, req) - elapsed := time.Since(start) - log.Info( - req.Context(), - "Request", - logging.NewKV( - "Method", - req.Method, - ), - logging.NewKV( - "Path", - req.URL.Path, - ), - logging.NewKV( - "Status", - lrw.statusCode, - ), - logging.NewKV( - "LengthBytes", - lrw.contentLength, - ), - logging.NewKV( - "ElapsedTime", - elapsed.String(), - ), - ) - }) -} diff --git a/api/http/logger_test.go b/api/http/logger_test.go deleted file mode 100644 index 9c2791d9df..0000000000 --- a/api/http/logger_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "bufio" - "encoding/json" - "net/http" - "net/http/httptest" - "os" - "path" - "strconv" - "testing" - - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/logging" -) - -func TestNewLoggingResponseWriterLogger(t *testing.T) { - rec := httptest.NewRecorder() - lrw := newLoggingResponseWriter(rec) - - lrw.WriteHeader(400) - assert.Equal(t, 400, lrw.statusCode) - - content := "Hello world!" - - length, err := lrw.Write([]byte(content)) - if err != nil { - t.Fatal(err) - } - assert.Equal(t, length, lrw.contentLength) - assert.Equal(t, rec.Body.String(), content) -} - -func TestLogginResponseWriterWriteWithChunks(t *testing.T) { - rec := httptest.NewRecorder() - lrw := newLoggingResponseWriter(rec) - - content := "Hello world!" - contentLength := len(content) - - lrw.Header().Set("Content-Length", strconv.Itoa(contentLength)) - - length1, err := lrw.Write([]byte(content[:contentLength/2])) - if err != nil { - t.Fatal(err) - } - - length2, err := lrw.Write([]byte(content[contentLength/2:])) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, contentLength, length1+length2) - assert.Equal(t, rec.Body.String(), content) -} - -func TestLoggerKeyValueOutput(t *testing.T) { - dir := t.TempDir() - - // send logs to temp file so we can inspect it - logFile := path.Join(dir, "http_test.log") - - req, err := http.NewRequest("GET", "/ping", nil) - if err != nil { - t.Fatal(err) - } - - rec2 := httptest.NewRecorder() - - log.ApplyConfig(logging.Config{ - EncoderFormat: logging.NewEncoderFormatOption(logging.JSON), - OutputPaths: []string{logFile}, - }) - loggerMiddleware(http.HandlerFunc(pingHandler)).ServeHTTP(rec2, req) - assert.Equal(t, 200, rec2.Result().StatusCode) - - // inspect the log file - kv, err := readLog(logFile) - if err != nil { - t.Fatal(err) - } - - // check that everything is as expected - assert.Equal(t, "{\"data\":{\"response\":\"pong\"}}", rec2.Body.String()) - assert.Equal(t, "INFO", kv["level"]) - assert.Equal(t, "http", kv["logger"]) - assert.Equal(t, "Request", kv["msg"]) - assert.Equal(t, "GET", kv["Method"]) - assert.Equal(t, "/ping", kv["Path"]) - assert.Equal(t, float64(200), kv["Status"]) - assert.Equal(t, float64(28), kv["LengthBytes"]) -} - -func readLog(path string) (map[string]any, error) { - // inspect the log file - f, err := os.Open(path) - if err != nil { - return nil, errors.WithStack(err) - } - - scanner := bufio.NewScanner(f) - scanner.Scan() - logLine := scanner.Text() - - kv := make(map[string]any) - err = json.Unmarshal([]byte(logLine), &kv) - if err != nil { - return nil, errors.WithStack(err) - } - - return kv, nil -} diff --git a/api/http/request_result.go b/api/http/request_result.go deleted file mode 100644 index f5bf7912e9..0000000000 --- a/api/http/request_result.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import "github.com/sourcenetwork/defradb/client" - -type GQLResult struct { - Errors []string `json:"errors,omitempty"` - - Data any `json:"data"` -} - -func newGQLResult(r client.GQLResult) *GQLResult { - errors := make([]string, len(r.Errors)) - for i := range r.Errors { - errors[i] = r.Errors[i].Error() - } - - return &GQLResult{ - Errors: errors, - Data: r.Data, - } -} diff --git a/api/http/router.go b/api/http/router.go deleted file mode 100644 index 2d54a16560..0000000000 --- a/api/http/router.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "net/http" - "net/url" - "path" - "strings" - - "github.com/pkg/errors" -) - -const ( - // Version is the current version of the HTTP API. - Version string = "v0" - versionedAPIPath string = "/api/" + Version - - RootPath string = versionedAPIPath + "" - PingPath string = versionedAPIPath + "/ping" - DumpPath string = versionedAPIPath + "/debug/dump" - BlocksPath string = versionedAPIPath + "/blocks" - GraphQLPath string = versionedAPIPath + "/graphql" - SchemaPath string = versionedAPIPath + "/schema" - SchemaMigrationPath string = SchemaPath + "/migration" - IndexPath string = versionedAPIPath + "/index" - PeerIDPath string = versionedAPIPath + "/peerid" - BackupPath string = versionedAPIPath + "/backup" - ExportPath string = BackupPath + "/export" - ImportPath string = BackupPath + "/import" -) - -// playgroundHandler is set when building with the playground build tag -var playgroundHandler http.Handler - -func setRoutes(h *handler) *handler { - h.Get(RootPath, rootHandler) - h.Get(PingPath, pingHandler) - h.Get(DumpPath, dumpHandler) - h.Get(BlocksPath+"/{cid}", getBlockHandler) - h.Get(GraphQLPath, execGQLHandler) - h.Post(GraphQLPath, execGQLHandler) - h.Get(SchemaPath, listSchemaHandler) - h.Post(SchemaPath, loadSchemaHandler) - h.Patch(SchemaPath, patchSchemaHandler) - h.Post(SchemaMigrationPath, setMigrationHandler) - h.Get(SchemaMigrationPath, getMigrationHandler) - h.Post(IndexPath, createIndexHandler) - h.Delete(IndexPath, dropIndexHandler) - h.Get(IndexPath, listIndexHandler) - h.Get(PeerIDPath, peerIDHandler) - h.Post(ExportPath, exportHandler) - h.Post(ImportPath, importHandler) - h.Handle("/*", playgroundHandler) - - return h -} - -// JoinPaths takes a base path and any number of additional paths -// and combines them safely to form a full URL path. -// The base must start with a http or https. -func JoinPaths(base string, paths ...string) (*url.URL, error) { - if !strings.HasPrefix(base, "http") { - return nil, ErrSchema - } - - u, err := url.Parse(base) - if err != nil { - return nil, errors.WithStack(err) - } - - u.Path = path.Join(u.Path, strings.Join(paths, "/")) - - return u, nil -} diff --git a/api/http/router_test.go b/api/http/router_test.go deleted file mode 100644 index e43260ef43..0000000000 --- a/api/http/router_test.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestJoinPathsWithBase(t *testing.T) { - path, err := JoinPaths("http://localhost:9181", BlocksPath, "cid_of_some_sort") - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "http://localhost:9181"+BlocksPath+"/cid_of_some_sort", path.String()) -} - -func TestJoinPathsWithNoBase(t *testing.T) { - _, err := JoinPaths("", BlocksPath, "cid_of_some_sort") - assert.ErrorIs(t, ErrSchema, err) -} - -func TestJoinPathsWithBaseWithoutHttpPrefix(t *testing.T) { - _, err := JoinPaths("localhost:9181", BlocksPath, "cid_of_some_sort") - assert.ErrorIs(t, ErrSchema, err) -} - -func TestJoinPathsWithNoPaths(t *testing.T) { - path, err := JoinPaths("http://localhost:9181") - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, "http://localhost:9181", path.String()) -} - -func TestJoinPathsWithInvalidCharacter(t *testing.T) { - _, err := JoinPaths("https://%gh&%ij") - assert.Error(t, err) -} diff --git a/api/http/server.go b/api/http/server.go deleted file mode 100644 index a71dccb0ec..0000000000 --- a/api/http/server.go +++ /dev/null @@ -1,322 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package http - -import ( - "context" - "crypto/tls" - "fmt" - "net" - "net/http" - "path" - "strings" - - "github.com/sourcenetwork/immutable" - "golang.org/x/crypto/acme/autocert" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" -) - -const ( - // These constants are best effort durations that fit our current API - // and possibly prevent from running out of file descriptors. - // readTimeout = 5 * time.Second - // writeTimeout = 10 * time.Second - // idleTimeout = 120 * time.Second - - // Temparily disabling timeouts until [this proposal](https://github.com/golang/go/issues/54136) is merged. - // https://github.com/sourcenetwork/defradb/issues/927 - readTimeout = 0 - writeTimeout = 0 - idleTimeout = 0 -) - -const ( - httpPort = ":80" - httpsPort = ":443" -) - -// Server struct holds the Handler for the HTTP API. -type Server struct { - options serverOptions - listener net.Listener - certManager *autocert.Manager - // address that is assigned to the server on listen - address string - - http.Server -} - -type serverOptions struct { - // list of allowed origins for CORS. - allowedOrigins []string - // ID of the server node. - peerID string - // when the value is present, the server will run with tls - tls immutable.Option[tlsOptions] - // root directory for the node config. - rootDir string - // The domain for the API (optional). - domain immutable.Option[string] -} - -type tlsOptions struct { - // Public key for TLS. Ignored if domain is set. - pubKey string - // Private key for TLS. Ignored if domain is set. - privKey string - // email address for the CA to send problem notifications (optional) - email string - // specify the tls port - port string -} - -// NewServer instantiates a new server with the given http.Handler. -func NewServer(db client.DB, options ...func(*Server)) *Server { - srv := &Server{ - Server: http.Server{ - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - IdleTimeout: idleTimeout, - }, - } - - for _, opt := range append(options, DefaultOpts()) { - opt(srv) - } - - srv.Handler = newHandler(db, srv.options) - - return srv -} - -func newHTTPRedirServer(m *autocert.Manager) *Server { - srv := &Server{ - Server: http.Server{ - ReadTimeout: readTimeout, - WriteTimeout: writeTimeout, - IdleTimeout: idleTimeout, - }, - } - - srv.Addr = httpPort - srv.Handler = m.HTTPHandler(nil) - - return srv -} - -// DefaultOpts returns the default options for the server. -func DefaultOpts() func(*Server) { - return func(s *Server) { - if s.Addr == "" { - s.Addr = "localhost:9181" - } - } -} - -// WithAllowedOrigins returns an option to set the allowed origins for CORS. -func WithAllowedOrigins(origins ...string) func(*Server) { - return func(s *Server) { - s.options.allowedOrigins = append(s.options.allowedOrigins, origins...) - } -} - -// WithAddress returns an option to set the address for the server. -func WithAddress(addr string) func(*Server) { - return func(s *Server) { - s.Addr = addr - - // If the address is not localhost, we check to see if it's a valid IP address. - // If it's not a valid IP, we assume that it's a domain name to be used with TLS. - if !strings.HasPrefix(addr, "localhost:") && !strings.HasPrefix(addr, ":") { - host, _, err := net.SplitHostPort(addr) - if err != nil { - host = addr - } - ip := net.ParseIP(host) - if ip == nil { - s.Addr = httpPort - s.options.domain = immutable.Some(host) - } - } - } -} - -// WithCAEmail returns an option to set the email address for the CA to send problem notifications. -func WithCAEmail(email string) func(*Server) { - return func(s *Server) { - tlsOpt := s.options.tls.Value() - tlsOpt.email = email - s.options.tls = immutable.Some(tlsOpt) - } -} - -// WithPeerID returns an option to set the identifier of the server node. -func WithPeerID(id string) func(*Server) { - return func(s *Server) { - s.options.peerID = id - } -} - -// WithRootDir returns an option to set the root directory for the node config. -func WithRootDir(rootDir string) func(*Server) { - return func(s *Server) { - s.options.rootDir = rootDir - } -} - -// WithSelfSignedCert returns an option to set the public and private keys for TLS. -func WithSelfSignedCert(pubKey, privKey string) func(*Server) { - return func(s *Server) { - tlsOpt := s.options.tls.Value() - tlsOpt.pubKey = pubKey - tlsOpt.privKey = privKey - s.options.tls = immutable.Some(tlsOpt) - } -} - -// WithTLS returns an option to enable TLS. -func WithTLS() func(*Server) { - return func(s *Server) { - tlsOpt := s.options.tls.Value() - tlsOpt.port = httpsPort - s.options.tls = immutable.Some(tlsOpt) - } -} - -// WithTLSPort returns an option to set the port for TLS. -func WithTLSPort(port int) func(*Server) { - return func(s *Server) { - tlsOpt := s.options.tls.Value() - tlsOpt.port = fmt.Sprintf(":%d", port) - s.options.tls = immutable.Some(tlsOpt) - } -} - -// Listen creates a new net.Listener and saves it on the receiver. -func (s *Server) Listen(ctx context.Context) error { - var err error - if s.options.tls.HasValue() { - return s.listenWithTLS(ctx) - } - - lc := net.ListenConfig{} - s.listener, err = lc.Listen(ctx, "tcp", s.Addr) - if err != nil { - return errors.WithStack(err) - } - - // Save the address on the server in case the port was set to random - // and that we want to see what was assigned. - s.address = s.listener.Addr().String() - - return nil -} - -func (s *Server) listenWithTLS(ctx context.Context) error { - cfg := &tls.Config{ - MinVersion: tls.VersionTLS12, - // We only allow cipher suites that are marked secure - // by ssllabs - CipherSuites: []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, - }, - ServerName: "DefraDB", - } - - if s.options.domain.HasValue() && s.options.domain.Value() != "" { - s.Addr = s.options.tls.Value().port - - if s.options.tls.Value().email == "" || s.options.tls.Value().email == config.DefaultAPIEmail { - return ErrNoEmail - } - - certCache := path.Join(s.options.rootDir, "autocerts") - - log.FeedbackInfo( - ctx, - "Generating auto certificate", - logging.NewKV("Domain", s.options.domain.Value()), - logging.NewKV("Certificate cache", certCache), - ) - - m := &autocert.Manager{ - Cache: autocert.DirCache(certCache), - Prompt: autocert.AcceptTOS, - Email: s.options.tls.Value().email, - HostPolicy: autocert.HostWhitelist(s.options.domain.Value()), - } - - cfg.GetCertificate = m.GetCertificate - - // We set manager on the server instance to later start - // a redirection server. - s.certManager = m - } else { - // When not using auto cert, we create a self signed certificate - // with the provided public and prive keys. - log.FeedbackInfo(ctx, "Generating self signed certificate") - - cert, err := tls.LoadX509KeyPair( - s.options.tls.Value().privKey, - s.options.tls.Value().pubKey, - ) - if err != nil { - return errors.WithStack(err) - } - - cfg.Certificates = []tls.Certificate{cert} - } - - var err error - s.listener, err = tls.Listen("tcp", s.Addr, cfg) - if err != nil { - return errors.WithStack(err) - } - - // Save the address on the server in case the port was set to random - // and that we want to see what was assigned. - s.address = s.listener.Addr().String() - - return nil -} - -// Run calls Serve with the receiver's listener. -func (s *Server) Run(ctx context.Context) error { - if s.listener == nil { - return ErrNoListener - } - - if s.certManager != nil { - // When using TLS it's important to redirect http requests to https - go func() { - srv := newHTTPRedirServer(s.certManager) - err := srv.ListenAndServe() - if err != nil && !errors.Is(err, http.ErrServerClosed) { - log.Info(ctx, "Something went wrong with the redirection server", logging.NewKV("Error", err)) - } - }() - } - return s.Serve(s.listener) -} - -// AssignedAddr returns the address that was assigned to the server on calls to listen. -func (s *Server) AssignedAddr() string { - return s.address -} diff --git a/cli/backup_export.go b/cli/backup_export.go index 32184bfe35..9e8d1c056e 100644 --- a/cli/backup_export.go +++ b/cli/backup_export.go @@ -11,24 +11,16 @@ package cli import ( - "bytes" - "encoding/json" - "io" - "net/http" - "os" "strings" "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" ) const jsonFileType = "json" -func MakeBackupExportCommand(cfg *config.Config) *cobra.Command { +func MakeBackupExportCommand() *cobra.Command { var collections []string var pretty bool var format string @@ -44,21 +36,14 @@ If the --pretty flag is provided, the JSON will be pretty printed. Example: export data for the 'Users' collection: defradb client export --collection Users user_data.json`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.ExactArgs(1)(cmd, args); err != nil { - return NewErrInvalidArgumentLength(err, 1) - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) (err error) { + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + if !isValidExportFormat(format) { return ErrInvalidExportFormat } outputPath := args[0] - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.ExportPath) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } for i := range collections { collections[i] = strings.Trim(collections[i], " ") @@ -71,57 +56,7 @@ Example: export data for the 'Users' collection: Collections: collections, } - b, err := json.Marshal(data) - if err != nil { - return err - } - - res, err := http.Post(endpoint.String(), "application/json", bytes.NewBuffer(b)) - if err != nil { - return NewErrFailedToSendRequest(err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return err - } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - type exportResponse struct { - Errors []struct { - Message string `json:"message"` - } `json:"errors"` - } - r := exportResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to export data", - logging.NewKV("Errors", r.Errors)) - } else if len(collections) == 1 { - log.FeedbackInfo(cmd.Context(), "Data exported for collection "+collections[0]) - } else if len(collections) > 1 { - log.FeedbackInfo(cmd.Context(), "Data exported for collections "+strings.Join(collections, ", ")) - } else { - log.FeedbackInfo(cmd.Context(), "Data exported for all collections") - } - } - return nil + return store.BasicExport(cmd.Context(), &data) }, } cmd.Flags().BoolVarP(&pretty, "pretty", "p", false, "Set the output JSON to be pretty printed") diff --git a/cli/backup_export_test.go b/cli/backup_export_test.go deleted file mode 100644 index 9539a1cdb1..0000000000 --- a/cli/backup_export_test.go +++ /dev/null @@ -1,300 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - "encoding/json" - "os" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" -) - -func TestBackupExportCmd_WithNoArgument_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - - dbExportCmd := MakeBackupExportCommand(cfg) - err := dbExportCmd.ValidateArgs([]string{}) - require.ErrorIs(t, err, ErrInvalidArgumentLength) -} - -func TestBackupExportCmd_WithInvalidExportFormat_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - dbExportCmd := MakeBackupExportCommand(cfg) - - filepath := t.TempDir() + "/test.json" - - dbExportCmd.Flags().Set("format", "invalid") - err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.ErrorIs(t, err, ErrInvalidExportFormat) -} - -func TestBackupExportCmd_IfInvalidAddress_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - cfg.API.Address = "invalid address" - - filepath := t.TempDir() + "/test.json" - - dbExportCmd := MakeBackupExportCommand(cfg) - err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) -} - -func TestBackupExportCmd_WithEmptyDatastore_NoError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbExportCmd := MakeBackupExportCommand(cfg) - err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Data exported for all collections")) - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - - require.Len(t, b, 2) // file should be an empty json object -} - -func TestBackupExportCmd_WithInvalidCollection_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbExportCmd := MakeBackupExportCommand(cfg) - dbExportCmd.Flags().Set("collections", "User") - err := dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Failed to export data")) -} - -func TestBackupExportCmd_WithAllCollection_NoError(t *testing.T) { - ctx := context.Background() - - cfg, di, close := startTestNode(t) - defer close() - - _, err := di.db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) - require.NoError(t, err) - - col, err := di.db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbExportCmd := MakeBackupExportCommand(cfg) - err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Data exported for all collections")) - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - - require.Equal( - t, - `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, - string(b), - ) -} - -func TestBackupExportCmd_WithAllCollectionAndPrettyFormating_NoError(t *testing.T) { - ctx := context.Background() - - cfg, di, close := startTestNode(t) - defer close() - - _, err := di.db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) - require.NoError(t, err) - - col, err := di.db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbExportCmd := MakeBackupExportCommand(cfg) - dbExportCmd.Flags().Set("pretty", "true") - err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Data exported for all collections")) - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - - require.Equal( - t, - `{ - "User": [ - { - "_key": "bae-e933420a-988a-56f8-8952-6c245aebd519", - "_newKey": "bae-e933420a-988a-56f8-8952-6c245aebd519", - "age": 30, - "name": "John" - } - ] -}`, - string(b), - ) -} - -func TestBackupExportCmd_WithSingleCollection_NoError(t *testing.T) { - ctx := context.Background() - - cfg, di, close := startTestNode(t) - defer close() - - _, err := di.db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) - require.NoError(t, err) - - col, err := di.db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbExportCmd := MakeBackupExportCommand(cfg) - dbExportCmd.Flags().Set("collections", "User") - err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Data exported for collection User")) - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - - require.Equal( - t, - `{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`, - string(b), - ) -} - -func TestBackupExportCmd_WithMultipleCollections_NoError(t *testing.T) { - ctx := context.Background() - - cfg, di, close := startTestNode(t) - defer close() - - _, err := di.db.AddSchema(ctx, `type User { - name: String - age: Int - } - - type Address { - street: String - city: String - }`) - require.NoError(t, err) - - doc1, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) - require.NoError(t, err) - - col1, err := di.db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - err = col1.Create(ctx, doc1) - require.NoError(t, err) - - doc2, err := client.NewDocFromJSON([]byte(`{"street": "101 Maple St", "city": "Toronto"}`)) - require.NoError(t, err) - - col2, err := di.db.GetCollectionByName(ctx, "Address") - require.NoError(t, err) - - err = col2.Create(ctx, doc2) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbExportCmd := MakeBackupExportCommand(cfg) - dbExportCmd.Flags().Set("collections", "User, Address") - err = dbExportCmd.RunE(dbExportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Data exported for collections User, Address")) - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - fileMap := map[string]any{} - err = json.Unmarshal(b, &fileMap) - require.NoError(t, err) - - expectedMap := map[string]any{} - data := []byte(`{"Address":[{"_key":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","_newKey":"bae-8096f2c1-ea4c-5226-8ba5-17fc4b68ac1f","city":"Toronto","street":"101 Maple St"}],"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`) - err = json.Unmarshal(data, &expectedMap) - require.NoError(t, err) - - require.EqualValues(t, expectedMap, fileMap) -} diff --git a/cli/backup_import.go b/cli/backup_import.go index 6802230aa0..35af345a0a 100644 --- a/cli/backup_import.go +++ b/cli/backup_import.go @@ -11,20 +11,10 @@ package cli import ( - "bytes" - "encoding/json" - "io" - "net/http" - "os" - "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" ) -func MakeBackupImportCommand(cfg *config.Config) *cobra.Command { +func MakeBackupImportCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "import ", Short: "Import a JSON data file to the database", @@ -32,66 +22,10 @@ func MakeBackupImportCommand(cfg *config.Config) *cobra.Command { Example: import data to the database: defradb client import user_data.json`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.ExactArgs(1)(cmd, args); err != nil { - return NewErrInvalidArgumentLength(err, 1) - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) (err error) { - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.ImportPath) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } - - inputPath := args[0] - data := map[string]string{ - "filepath": inputPath, - } - - b, err := json.Marshal(data) - if err != nil { - return err - } - - res, err := http.Post(endpoint.String(), "application/json", bytes.NewBuffer(b)) - if err != nil { - return NewErrFailedToSendRequest(err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return err - } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - r := indexCreateResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to import data", - logging.NewKV("Errors", r.Errors)) - } else { - log.FeedbackInfo(cmd.Context(), "Successfully imported data from file", - logging.NewKV("File", inputPath)) - } - } - return nil + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + return store.BasicImport(cmd.Context(), args[0]) }, } return cmd diff --git a/cli/backup_import_test.go b/cli/backup_import_test.go deleted file mode 100644 index 101792dd0c..0000000000 --- a/cli/backup_import_test.go +++ /dev/null @@ -1,129 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - "os" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" -) - -func TestBackupImportCmd_WithNoArgument_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - - dbImportCmd := MakeBackupImportCommand(cfg) - err := dbImportCmd.ValidateArgs([]string{}) - require.ErrorIs(t, err, ErrInvalidArgumentLength) -} - -func TestBackupImportCmd_IfInvalidAddress_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - cfg.API.Address = "invalid address" - - filepath := t.TempDir() + "/test.json" - - dbImportCmd := MakeBackupImportCommand(cfg) - err := dbImportCmd.RunE(dbImportCmd, []string{filepath}) - require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) -} - -func TestBackupImportCmd_WithNonExistantFile_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - filepath := t.TempDir() + "/test.json" - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbImportCmd := MakeBackupImportCommand(cfg) - err := dbImportCmd.RunE(dbImportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Failed to import data")) -} - -func TestBackupImportCmd_WithEmptyDatastore_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - filepath := t.TempDir() + "/test.json" - - err := os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), - 0664, - ) - require.NoError(t, err) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbImportCmd := MakeBackupImportCommand(cfg) - err = dbImportCmd.RunE(dbImportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Failed to import data")) -} - -func TestBackupImportCmd_WithExistingCollection_NoError(t *testing.T) { - ctx := context.Background() - - cfg, di, close := startTestNode(t) - defer close() - - _, err := di.db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - filepath := t.TempDir() + "/test.json" - - err = os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-e933420a-988a-56f8-8952-6c245aebd519","age":30,"name":"John"}]}`), - 0664, - ) - require.NoError(t, err) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - dbImportCmd := MakeBackupImportCommand(cfg) - err = dbImportCmd.RunE(dbImportCmd, []string{filepath}) - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, lineHas(logLines, "msg", "Successfully imported data from file")) - - col, err := di.db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - key, err := client.NewDocKeyFromString("bae-e933420a-988a-56f8-8952-6c245aebd519") - require.NoError(t, err) - doc, err := col.Get(ctx, key, false) - require.NoError(t, err) - - val, err := doc.Get("name") - require.NoError(t, err) - - require.Equal(t, "John", val.(string)) -} diff --git a/cli/blocks_get.go b/cli/blocks_get.go deleted file mode 100644 index c3519f99e7..0000000000 --- a/cli/blocks_get.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "io" - "net/http" - "os" - - "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" -) - -func MakeBlocksGetCommand(cfg *config.Config) *cobra.Command { - var cmd = &cobra.Command{ - Use: "get [CID]", - Short: "Get a block by its CID from the blockstore", - RunE: func(cmd *cobra.Command, args []string) (err error) { - if len(args) != 1 { - return NewErrMissingArg("CID") - } - cid := args[0] - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.BlocksPath, cid) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } - - res, err := http.Get(endpoint.String()) - if err != nil { - return NewErrFailedToSendRequest(err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToReadResponseBody(err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return NewErrFailedToStatStdOut(err) - } - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - graphlErr, err := hasGraphQLErrors(response) - if err != nil { - return NewErrFailedToHandleGQLErrors(err) - } - indentedResult, err := indentJSON(response) - if err != nil { - return NewErrFailedToPrettyPrintResponse(err) - } - if graphlErr { - log.FeedbackError(cmd.Context(), indentedResult) - } else { - log.FeedbackInfo(cmd.Context(), indentedResult) - } - } - return nil - }, - } - return cmd -} diff --git a/cli/cli.go b/cli/cli.go index 707adbab7c..0f93b69633 100644 --- a/cli/cli.go +++ b/cli/cli.go @@ -14,176 +14,105 @@ Package cli provides the command-line interface. package cli import ( - "bufio" - "bytes" - "context" - "encoding/json" - "os" - "strings" - "github.com/spf13/cobra" "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" ) var log = logging.MustNewLogger("cli") -const badgerDatastoreName = "badger" - -// Errors with how the command is invoked by user -var usageErrors = []string{ - // cobra errors - subject to change with new versions of cobra - "flag needs an argument", - "invalid syntax", - "unknown flag", - "unknown shorthand flag", - "unknown command", - // custom defradb errors - errMissingArg, - errMissingArgs, - errTooManyArgs, -} - -type DefraCommand struct { - RootCmd *cobra.Command - Cfg *config.Config -} - // NewDefraCommand returns the root command instanciated with its tree of subcommands. -func NewDefraCommand(cfg *config.Config) DefraCommand { - rootCmd := MakeRootCommand(cfg) - rpcCmd := MakeRPCCommand(cfg) - blocksCmd := MakeBlocksCommand() - schemaCmd := MakeSchemaCommand() - schemaMigrationCmd := MakeSchemaMigrationCommand() - indexCmd := MakeIndexCommand() - clientCmd := MakeClientCommand() - backupCmd := MakeBackupCommand() - rpcReplicatorCmd := MakeReplicatorCommand() - p2pCollectionCmd := MakeP2PCollectionCommand() - p2pCollectionCmd.AddCommand( - MakeP2PCollectionAddCommand(cfg), - MakeP2PCollectionRemoveCommand(cfg), - MakeP2PCollectionGetallCommand(cfg), +func NewDefraCommand(cfg *config.Config) *cobra.Command { + p2p_collection := MakeP2PCollectionCommand() + p2p_collection.AddCommand( + MakeP2PCollectionAddCommand(), + MakeP2PCollectionRemoveCommand(), + MakeP2PCollectionGetAllCommand(), + ) + + p2p_replicator := MakeP2PReplicatorCommand() + p2p_replicator.AddCommand( + MakeP2PReplicatorGetAllCommand(), + MakeP2PReplicatorSetCommand(), + MakeP2PReplicatorDeleteCommand(), ) - rpcReplicatorCmd.AddCommand( - MakeReplicatorGetallCommand(cfg), - MakeReplicatorSetCommand(cfg), - MakeReplicatorDeleteCommand(cfg), + + p2p := MakeP2PCommand() + p2p.AddCommand( + p2p_replicator, + p2p_collection, + MakeP2PInfoCommand(), ) - rpcCmd.AddCommand( - rpcReplicatorCmd, - p2pCollectionCmd, + + schema_migrate := MakeSchemaMigrationCommand() + schema_migrate.AddCommand( + MakeSchemaMigrationSetCommand(), + MakeSchemaMigrationGetCommand(), + MakeSchemaMigrationReloadCommand(), + MakeSchemaMigrationUpCommand(), + MakeSchemaMigrationDownCommand(), ) - blocksCmd.AddCommand( - MakeBlocksGetCommand(cfg), + + schema := MakeSchemaCommand() + schema.AddCommand( + MakeSchemaAddCommand(), + MakeSchemaPatchCommand(), + MakeSchemaSetDefaultCommand(), + MakeSchemaDescribeCommand(), + schema_migrate, ) - schemaMigrationCmd.AddCommand( - MakeSchemaMigrationSetCommand(cfg), - MakeSchemaMigrationGetCommand(cfg), + + index := MakeIndexCommand() + index.AddCommand( + MakeIndexCreateCommand(), + MakeIndexDropCommand(), + MakeIndexListCommand(), ) - schemaCmd.AddCommand( - MakeSchemaAddCommand(cfg), - MakeSchemaListCommand(cfg), - MakeSchemaPatchCommand(cfg), - schemaMigrationCmd, + + backup := MakeBackupCommand() + backup.AddCommand( + MakeBackupExportCommand(), + MakeBackupImportCommand(), ) - indexCmd.AddCommand( - MakeIndexCreateCommand(cfg), - MakeIndexDropCommand(cfg), - MakeIndexListCommand(cfg), + + tx := MakeTxCommand() + tx.AddCommand( + MakeTxCreateCommand(cfg), + MakeTxCommitCommand(cfg), + MakeTxDiscardCommand(cfg), ) - backupCmd.AddCommand( - MakeBackupExportCommand(cfg), - MakeBackupImportCommand(cfg), + + collection := MakeCollectionCommand(cfg) + collection.AddCommand( + MakeCollectionGetCommand(), + MakeCollectionKeysCommand(), + MakeCollectionDeleteCommand(), + MakeCollectionUpdateCommand(), + MakeCollectionCreateCommand(), + MakeCollectionDescribeCommand(), ) - clientCmd.AddCommand( - MakeDumpCommand(cfg), - MakePingCommand(cfg), - MakeRequestCommand(cfg), - MakePeerIDCommand(cfg), - schemaCmd, - indexCmd, - rpcCmd, - blocksCmd, - backupCmd, + + client := MakeClientCommand(cfg) + client.AddCommand( + MakeDumpCommand(), + MakeRequestCommand(), + schema, + index, + p2p, + backup, + tx, + collection, ) - rootCmd.AddCommand( - clientCmd, + + root := MakeRootCommand(cfg) + root.AddCommand( + client, MakeStartCommand(cfg), MakeServerDumpCmd(cfg), MakeVersionCommand(), MakeInitCommand(cfg), ) - return DefraCommand{rootCmd, cfg} -} - -func (defraCmd *DefraCommand) Execute(ctx context.Context) error { - // Silence cobra's default output to control usage and error display. - defraCmd.RootCmd.SilenceUsage = true - defraCmd.RootCmd.SilenceErrors = true - defraCmd.RootCmd.SetOut(os.Stdout) - cmd, err := defraCmd.RootCmd.ExecuteContextC(ctx) - if err != nil { - // Intentional cancellation. - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - return nil - } - // User error. - for _, cobraError := range usageErrors { - if strings.HasPrefix(err.Error(), cobraError) { - log.FeedbackErrorE(ctx, "Usage error", err) - if usageErr := cmd.Usage(); usageErr != nil { - log.FeedbackFatalE(ctx, "error displaying usage help", usageErr) - } - return err - } - } - // Internal error. - log.FeedbackErrorE(ctx, "Execution error", err) - return err - } - return nil -} - -func isFileInfoPipe(fi os.FileInfo) bool { - return fi.Mode()&os.ModeNamedPipe != 0 -} - -func readStdin() (string, error) { - var s strings.Builder - scanner := bufio.NewScanner(os.Stdin) - for scanner.Scan() { - s.Write(scanner.Bytes()) - } - if err := scanner.Err(); err != nil { - return "", errors.Wrap("reading standard input", err) - } - return s.String(), nil -} - -func indentJSON(b []byte) (string, error) { - var indentedJSON bytes.Buffer - err := json.Indent(&indentedJSON, b, "", " ") - return indentedJSON.String(), err -} - -type graphqlErrors struct { - Errors any `json:"errors"` -} - -func hasGraphQLErrors(buf []byte) (bool, error) { - errs := graphqlErrors{} - err := json.Unmarshal(buf, &errs) - if err != nil { - return false, errors.Wrap("couldn't parse GraphQL response %w", err) - } - if errs.Errors != nil { - return true, nil - } else { - return false, nil - } + return root } diff --git a/cli/cli_test.go b/cli/cli_test.go deleted file mode 100644 index 877dd7b69f..0000000000 --- a/cli/cli_test.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "testing" - - "github.com/spf13/cobra" - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/config" -) - -// Verify that the top-level commands are registered, and if particular ones have subcommands. -func TestNewDefraCommand(t *testing.T) { - expectedCommandNames := []string{ - "client", - "init", - "server-dump", - "start", - "version", - } - actualCommandNames := []string{} - r := NewDefraCommand(config.DefaultConfig()) - for _, c := range r.RootCmd.Commands() { - actualCommandNames = append(actualCommandNames, c.Name()) - } - for _, expectedCommandName := range expectedCommandNames { - assert.Contains(t, actualCommandNames, expectedCommandName) - } - for _, c := range r.RootCmd.Commands() { - if c.Name() == "client" { - assert.NotEmpty(t, c.Commands()) - } - } -} - -func TestAllHaveUsage(t *testing.T) { - cfg := config.DefaultConfig() - defra := NewDefraCommand(cfg) - walkCommandTree(t, defra.RootCmd, func(c *cobra.Command) { - assert.NotEmpty(t, c.Use) - }) -} - -func walkCommandTree(t *testing.T, cmd *cobra.Command, f func(*cobra.Command)) { - f(cmd) - for _, c := range cmd.Commands() { - walkCommandTree(t, c, f) - } -} diff --git a/cli/client.go b/cli/client.go index 2456df8d43..8866294f69 100644 --- a/cli/client.go +++ b/cli/client.go @@ -12,15 +12,27 @@ package cli import ( "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/config" ) -func MakeClientCommand() *cobra.Command { +func MakeClientCommand(cfg *config.Config) *cobra.Command { + var txID uint64 var cmd = &cobra.Command{ Use: "client", Short: "Interact with a DefraDB node", Long: `Interact with a DefraDB node. Execute queries, add schema types, obtain node info, etc.`, + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + if err := loadConfig(cfg); err != nil { + return err + } + if err := setTransactionContext(cmd, cfg, txID); err != nil { + return err + } + return setStoreContext(cmd, cfg) + }, } - + cmd.PersistentFlags().Uint64Var(&txID, "tx", 0, "Transaction ID") return cmd } diff --git a/cli/collection.go b/cli/collection.go new file mode 100644 index 0000000000..8af1839b47 --- /dev/null +++ b/cli/collection.go @@ -0,0 +1,111 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/datastore" +) + +func MakeCollectionCommand(cfg *config.Config) *cobra.Command { + var txID uint64 + var name string + var schemaRoot string + var versionID string + var cmd = &cobra.Command{ + Use: "collection [--name --schema --version ]", + Short: "Interact with a collection.", + Long: `Create, read, update, and delete documents within a collection.`, + PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { + // cobra does not chain pre run calls so we have to run them again here + if err := loadConfig(cfg); err != nil { + return err + } + if err := setTransactionContext(cmd, cfg, txID); err != nil { + return err + } + if err := setStoreContext(cmd, cfg); err != nil { + return err + } + store := mustGetStoreContext(cmd) + + var col client.Collection + var cols []client.Collection + switch { + case versionID != "": + cols, err = store.GetCollectionsByVersionID(cmd.Context(), versionID) + + case schemaRoot != "": + cols, err = store.GetCollectionsBySchemaRoot(cmd.Context(), schemaRoot) + + case name != "": + col, err = store.GetCollectionByName(cmd.Context(), name) + cols = []client.Collection{col} + + default: + return nil + } + + if err != nil { + return err + } + + if schemaRoot != "" && versionID != "" && len(cols) > 0 { + if cols[0].SchemaRoot() != schemaRoot { + // If the a versionID has been provided that does not pair up with the given schema root + // we should error and let the user know they have provided impossible params. + // We only need to check the first item - they will all be the same. + return NewErrSchemaVersionNotOfSchema(schemaRoot, versionID) + } + } + + if name != "" { + // Multiple params may have been specified, and in some cases both are needed. + // For example if a schema version and a collection name have been provided, + // we need to ensure that a collection at the requested version is returned. + // Likewise we need to ensure that if a collection name and schema id are provided, + // but there are none matching both, that nothing is returned. + fetchedCols := cols + cols = nil + for _, c := range fetchedCols { + if c.Name() == name { + cols = append(cols, c) + break + } + } + } + + if len(cols) != 1 { + // If more than one collection matches the given criteria we cannot set the context collection + return nil + } + col = cols[0] + + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + col = col.WithTxn(tx) + } + + ctx := context.WithValue(cmd.Context(), colContextKey, col) + cmd.SetContext(ctx) + return nil + }, + } + cmd.PersistentFlags().Uint64Var(&txID, "tx", 0, "Transaction ID") + cmd.PersistentFlags().StringVar(&name, "name", "", "Collection name") + cmd.PersistentFlags().StringVar(&schemaRoot, "schema", "", "Collection schema Root") + cmd.PersistentFlags().StringVar(&versionID, "version", "", "Collection version ID") + return cmd +} diff --git a/cli/collection_create.go b/cli/collection_create.go new file mode 100644 index 0000000000..4dca9be33a --- /dev/null +++ b/cli/collection_create.go @@ -0,0 +1,102 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "os" + + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeCollectionCreateCommand() *cobra.Command { + var file string + var cmd = &cobra.Command{ + Use: "create ", + Short: "Create a new document.", + Long: `Create a new document. + +Example: create from string + defradb client collection create --name User '{ "name": "Bob" }' + +Example: create multiple from string + defradb client collection create --name User '[{ "name": "Alice" }, { "name": "Bob" }]' + +Example: create from file + defradb client collection create --name User -f document.json + +Example: create from stdin + cat document.json | defradb client collection create --name User - + `, + Args: cobra.RangeArgs(0, 1), + RunE: func(cmd *cobra.Command, args []string) error { + col, ok := tryGetCollectionContext(cmd) + if !ok { + return cmd.Usage() + } + + var docData []byte + switch { + case file != "": + data, err := os.ReadFile(file) + if err != nil { + return err + } + docData = data + case len(args) == 1 && args[0] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) + if err != nil { + return err + } + docData = data + case len(args) == 1: + docData = []byte(args[0]) + default: + return ErrNoDocOrFile + } + + var docMap any + if err := json.Unmarshal(docData, &docMap); err != nil { + return err + } + + switch t := docMap.(type) { + case map[string]any: + doc, err := client.NewDocFromMap(t) + if err != nil { + return err + } + return col.Create(cmd.Context(), doc) + case []any: + docs := make([]*client.Document, len(t)) + for i, v := range t { + docMap, ok := v.(map[string]any) + if !ok { + return ErrInvalidDocument + } + doc, err := client.NewDocFromMap(docMap) + if err != nil { + return err + } + docs[i] = doc + } + return col.CreateMany(cmd.Context(), docs) + default: + return ErrInvalidDocument + } + }, + } + cmd.Flags().StringVarP(&file, "file", "f", "", "File containing document(s)") + return cmd +} diff --git a/cli/collection_delete.go b/cli/collection_delete.go new file mode 100644 index 0000000000..85539d5eb3 --- /dev/null +++ b/cli/collection_delete.go @@ -0,0 +1,78 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeCollectionDeleteCommand() *cobra.Command { + var keys []string + var filter string + var cmd = &cobra.Command{ + Use: "delete [--filter --key ]", + Short: "Delete documents by key or filter.", + Long: `Delete documents by key or filter and lists the number of documents deleted. + +Example: delete by key(s) + defradb client collection delete --name User --key bae-123,bae-456 + +Example: delete by filter + defradb client collection delete --name User --filter '{ "_gte": { "points": 100 } }' + `, + RunE: func(cmd *cobra.Command, args []string) error { + col, ok := tryGetCollectionContext(cmd) + if !ok { + return cmd.Usage() + } + + switch { + case len(keys) == 1: + docKey, err := client.NewDocKeyFromString(keys[0]) + if err != nil { + return err + } + res, err := col.DeleteWithKey(cmd.Context(), docKey) + if err != nil { + return err + } + return writeJSON(cmd, res) + case len(keys) > 1: + docKeys := make([]client.DocKey, len(keys)) + for i, v := range keys { + docKey, err := client.NewDocKeyFromString(v) + if err != nil { + return err + } + docKeys[i] = docKey + } + res, err := col.DeleteWithKeys(cmd.Context(), docKeys) + if err != nil { + return err + } + return writeJSON(cmd, res) + case filter != "": + res, err := col.DeleteWithFilter(cmd.Context(), filter) + if err != nil { + return err + } + return writeJSON(cmd, res) + default: + return ErrNoDocKeyOrFilter + } + }, + } + cmd.Flags().StringSliceVar(&keys, "key", nil, "Document key") + cmd.Flags().StringVar(&filter, "filter", "", "Document filter") + return cmd +} diff --git a/cli/collection_describe.go b/cli/collection_describe.go new file mode 100644 index 0000000000..a21c4d0c10 --- /dev/null +++ b/cli/collection_describe.go @@ -0,0 +1,57 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeCollectionDescribeCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "describe", + Short: "View collection description.", + Long: `Introspect collection types. + +Example: view all collections + defradb client collection describe + +Example: view collection by name + defradb client collection describe --name User + +Example: view collection by schema id + defradb client collection describe --schema bae123 + +Example: view collection by version id + defradb client collection describe --version bae123 + `, + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + col, ok := tryGetCollectionContext(cmd) + if ok { + return writeJSON(cmd, col.Definition()) + } + // if no collection specified list all collections + cols, err := store.GetAllCollections(cmd.Context()) + if err != nil { + return err + } + colDesc := make([]client.CollectionDefinition, len(cols)) + for i, col := range cols { + colDesc[i] = col.Definition() + } + return writeJSON(cmd, colDesc) + }, + } + return cmd +} diff --git a/cli/collection_get.go b/cli/collection_get.go new file mode 100644 index 0000000000..d908bbdb7a --- /dev/null +++ b/cli/collection_get.go @@ -0,0 +1,53 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeCollectionGetCommand() *cobra.Command { + var showDeleted bool + var cmd = &cobra.Command{ + Use: "get [--show-deleted]", + Short: "View document fields.", + Long: `View document fields. + +Example: + defradb client collection get --name User bae-123 + `, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + col, ok := tryGetCollectionContext(cmd) + if !ok { + return cmd.Usage() + } + + docKey, err := client.NewDocKeyFromString(args[0]) + if err != nil { + return err + } + doc, err := col.Get(cmd.Context(), docKey, showDeleted) + if err != nil { + return err + } + docMap, err := doc.ToMap() + if err != nil { + return err + } + return writeJSON(cmd, docMap) + }, + } + cmd.Flags().BoolVar(&showDeleted, "show-deleted", false, "Show deleted documents") + return cmd +} diff --git a/cli/collection_keys.go b/cli/collection_keys.go new file mode 100644 index 0000000000..a453c16a86 --- /dev/null +++ b/cli/collection_keys.go @@ -0,0 +1,53 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/http" +) + +func MakeCollectionKeysCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "keys", + Short: "List all document keys.", + Long: `List all document keys. + +Example: + defradb client collection keys --name User + `, + RunE: func(cmd *cobra.Command, args []string) error { + col, ok := tryGetCollectionContext(cmd) + if !ok { + return cmd.Usage() + } + + docCh, err := col.GetAllDocKeys(cmd.Context()) + if err != nil { + return err + } + for docKey := range docCh { + results := &http.DocKeyResult{ + Key: docKey.Key.String(), + } + if docKey.Err != nil { + results.Error = docKey.Err.Error() + } + if err := writeJSON(cmd, results); err != nil { + return err + } + } + return nil + }, + } + return cmd +} diff --git a/cli/collection_update.go b/cli/collection_update.go new file mode 100644 index 0000000000..317a2e8119 --- /dev/null +++ b/cli/collection_update.go @@ -0,0 +1,99 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeCollectionUpdateCommand() *cobra.Command { + var keys []string + var filter string + var updater string + var cmd = &cobra.Command{ + Use: "update [--filter --key --updater ] ", + Short: "Update documents by key or filter.", + Long: `Update documents by key or filter. + +Example: update from string + defradb client collection update --name User --key bae-123 '{ "name": "Bob" }' + +Example: update by filter + defradb client collection update --name User \ + --filter '{ "_gte": { "points": 100 } }' --updater '{ "verified": true }' + +Example: update by keys + defradb client collection update --name User \ + --key bae-123,bae-456 --updater '{ "verified": true }' + `, + Args: cobra.RangeArgs(0, 1), + RunE: func(cmd *cobra.Command, args []string) error { + col, ok := tryGetCollectionContext(cmd) + if !ok { + return cmd.Usage() + } + + switch { + case len(keys) == 1 && updater != "": + docKey, err := client.NewDocKeyFromString(keys[0]) + if err != nil { + return err + } + res, err := col.UpdateWithKey(cmd.Context(), docKey, updater) + if err != nil { + return err + } + return writeJSON(cmd, res) + case len(keys) > 1 && updater != "": + docKeys := make([]client.DocKey, len(keys)) + for i, v := range keys { + docKey, err := client.NewDocKeyFromString(v) + if err != nil { + return err + } + docKeys[i] = docKey + } + res, err := col.UpdateWithKeys(cmd.Context(), docKeys, updater) + if err != nil { + return err + } + return writeJSON(cmd, res) + case filter != "" && updater != "": + res, err := col.UpdateWithFilter(cmd.Context(), filter, updater) + if err != nil { + return err + } + return writeJSON(cmd, res) + case len(keys) == 1 && len(args) == 1: + docKey, err := client.NewDocKeyFromString(keys[0]) + if err != nil { + return err + } + doc, err := col.Get(cmd.Context(), docKey, true) + if err != nil { + return err + } + if err := doc.SetWithJSON([]byte(args[0])); err != nil { + return err + } + return col.Update(cmd.Context(), doc) + default: + return ErrNoDocKeyOrFilter + } + }, + } + cmd.Flags().StringSliceVar(&keys, "key", nil, "Document key") + cmd.Flags().StringVar(&filter, "filter", "", "Document filter") + cmd.Flags().StringVar(&updater, "updater", "", "Document updater") + return cmd +} diff --git a/cli/dump.go b/cli/dump.go index f35e9232b1..a3d155605b 100644 --- a/cli/dump.go +++ b/cli/dump.go @@ -11,69 +11,18 @@ package cli import ( - "encoding/json" - "io" - "net/http" - "os" - "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/client" ) -func MakeDumpCommand(cfg *config.Config) *cobra.Command { +func MakeDumpCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "dump", Short: "Dump the contents of DefraDB node-side", RunE: func(cmd *cobra.Command, _ []string) (err error) { - stdout, err := os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - if !isFileInfoPipe(stdout) { - log.FeedbackInfo(cmd.Context(), "Requesting the database to dump its state, server-side...") - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.DumpPath) - if err != nil { - return errors.Wrap("failed to join endpoint", err) - } - - res, err := http.Get(endpoint.String()) - if err != nil { - return errors.Wrap("failed dump request", err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - // dumpResponse follows structure of HTTP API's response - type dumpResponse struct { - Data struct { - Response string `json:"response"` - } `json:"data"` - } - r := dumpResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return errors.Wrap("failed parsing of response", err) - } - log.FeedbackInfo(cmd.Context(), r.Data.Response) - } - return nil + db := cmd.Context().Value(dbContextKey).(client.DB) + return db.PrintDump(cmd.Context()) }, } return cmd diff --git a/cli/errors.go b/cli/errors.go index 17e4819a8b..937bdd2c9d 100644 --- a/cli/errors.go +++ b/cli/errors.go @@ -11,133 +11,32 @@ package cli import ( - "strings" - "github.com/sourcenetwork/defradb/errors" ) const ( - errMissingArg string = "missing argument" - errMissingArgs string = "missing arguments" - errTooManyArgs string = "too many arguments" - errEmptyStdin string = "empty stdin" - errEmptyFile string = "empty file" - errFailedToReadFile string = "failed to read file" - errFailedToReadStdin string = "failed to read stdin" - errFailedToCreateRPCClient string = "failed to create RPC client" - errFailedToAddReplicator string = "failed to add replicator, request failed" - errFailedToJoinEndpoint string = "failed to join endpoint" - errFailedToSendRequest string = "failed to send request" - errFailedToReadResponseBody string = "failed to read response body" - errFailedToCloseResponseBody string = "failed to close response body" - errFailedToStatStdOut string = "failed to stat stdout" - errFailedToHandleGQLErrors string = "failed to handle GraphQL errors" - errFailedToPrettyPrintResponse string = "failed to pretty print response" - errFailedToUnmarshalResponse string = "failed to unmarshal response" - errFailedParsePeerID string = "failed to parse PeerID" - errFailedToMarshalData string = "failed to marshal data" - errInvalidArgumentLength string = "invalid argument length" + errInvalidLensConfig string = "invalid lens configuration" + errSchemaVersionNotOfSchema string = "the given schema version is from a different schema" ) -// Errors returnable from this package. -// -// This list is incomplete and undefined errors may also be returned. -// Errors returned from this package may be tested against these errors with errors.Is. var ( - ErrMissingArg = errors.New(errMissingArg) - ErrMissingArgs = errors.New(errMissingArgs) - ErrTooManyArgs = errors.New(errTooManyArgs) - ErrEmptyFile = errors.New(errEmptyFile) - ErrEmptyStdin = errors.New(errEmptyStdin) - ErrFailedToReadFile = errors.New(errFailedToReadFile) - ErrFailedToReadStdin = errors.New(errFailedToReadStdin) - ErrFailedToCreateRPCClient = errors.New(errFailedToCreateRPCClient) - ErrFailedToAddReplicator = errors.New(errFailedToAddReplicator) - ErrFailedToJoinEndpoint = errors.New(errFailedToJoinEndpoint) - ErrFailedToSendRequest = errors.New(errFailedToSendRequest) - ErrFailedToReadResponseBody = errors.New(errFailedToReadResponseBody) - ErrFailedToStatStdOut = errors.New(errFailedToStatStdOut) - ErrFailedToHandleGQLErrors = errors.New(errFailedToHandleGQLErrors) - ErrFailedToPrettyPrintResponse = errors.New(errFailedToPrettyPrintResponse) - ErrFailedToUnmarshalResponse = errors.New(errFailedToUnmarshalResponse) - ErrFailedParsePeerID = errors.New(errFailedParsePeerID) - ErrInvalidExportFormat = errors.New("invalid export format") - ErrInvalidArgumentLength = errors.New(errInvalidArgumentLength) + ErrNoDocOrFile = errors.New("document or file must be defined") + ErrInvalidDocument = errors.New("invalid document") + ErrNoDocKeyOrFilter = errors.New("document key or filter must be defined") + ErrInvalidExportFormat = errors.New("invalid export format") + ErrNoLensConfig = errors.New("lens config cannot be empty") + ErrInvalidLensConfig = errors.New("invalid lens configuration") + ErrSchemaVersionNotOfSchema = errors.New(errSchemaVersionNotOfSchema) ) -func NewErrMissingArg(name string) error { - return errors.New(errMissingArg, errors.NewKV("Name", name)) -} - -func NewErrMissingArgs(names []string) error { - return errors.New(errMissingArgs, errors.NewKV("Required", strings.Join(names, ", "))) -} - -func NewErrTooManyArgs(max, actual int) error { - return errors.New(errTooManyArgs, errors.NewKV("Max", max), errors.NewKV("Actual", actual)) -} - -func NewFailedToReadFile(inner error) error { - return errors.Wrap(errFailedToReadFile, inner) -} - -func NewFailedToReadStdin(inner error) error { - return errors.Wrap(errFailedToReadStdin, inner) -} - -func NewErrFailedToCreateRPCClient(inner error) error { - return errors.Wrap(errFailedToCreateRPCClient, inner) -} - -func NewErrFailedToAddReplicator(inner error) error { - return errors.Wrap(errFailedToAddReplicator, inner) -} - -func NewErrFailedToJoinEndpoint(inner error) error { - return errors.Wrap(errFailedToJoinEndpoint, inner) -} - -func NewErrFailedToSendRequest(inner error) error { - return errors.Wrap(errFailedToSendRequest, inner) -} - -func NewErrFailedToReadResponseBody(inner error) error { - return errors.Wrap(errFailedToReadResponseBody, inner) -} - -func NewErrFailedToCloseResponseBody(closeErr, other error) error { - if other != nil { - return errors.Wrap(errFailedToCloseResponseBody, closeErr, errors.NewKV("Other error", other)) - } - return errors.Wrap(errFailedToCloseResponseBody, closeErr) -} - -func NewErrFailedToStatStdOut(inner error) error { - return errors.Wrap(errFailedToStatStdOut, inner) -} - -func NewErrFailedToHandleGQLErrors(inner error) error { - return errors.Wrap(errFailedToHandleGQLErrors, inner) -} - -func NewErrFailedToPrettyPrintResponse(inner error) error { - return errors.Wrap(errFailedToPrettyPrintResponse, inner) -} - -func NewErrFailedToUnmarshalResponse(inner error) error { - return errors.Wrap(errFailedToUnmarshalResponse, inner) -} - -func NewErrFailedParsePeerID(inner error) error { - return errors.Wrap(errFailedParsePeerID, inner) -} - -// NewFailedToMarshalData returns an error indicating that a there was a problem with mashalling. -func NewFailedToMarshalData(inner error) error { - return errors.Wrap(errFailedToMarshalData, inner) +func NewErrInvalidLensConfig(inner error) error { + return errors.Wrap(errInvalidLensConfig, inner) } -// NewErrInvalidArgumentLength returns an error indicating an incorrect number of arguments. -func NewErrInvalidArgumentLength(inner error, expected int) error { - return errors.Wrap(errInvalidArgumentLength, inner, errors.NewKV("Expected", expected)) +func NewErrSchemaVersionNotOfSchema(schemaRoot string, schemaVersionID string) error { + return errors.New( + errSchemaVersionNotOfSchema, + errors.NewKV("SchemaRoot", schemaRoot), + errors.NewKV("SchemaVersionID", schemaVersionID), + ) } diff --git a/cli/index_create.go b/cli/index_create.go index a91a76d2d0..42866267fc 100644 --- a/cli/index_create.go +++ b/cli/index_create.go @@ -11,33 +11,16 @@ package cli import ( - "bytes" - "encoding/json" - "io" - "net/http" - "os" - "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/defradb/datastore" ) -type indexCreateResponse struct { - Data struct { - Index client.IndexDescription `json:"index"` - } `json:"data"` - Errors []struct { - Message string `json:"message"` - } `json:"errors"` -} - -func MakeIndexCreateCommand(cfg *config.Config) *cobra.Command { +func MakeIndexCreateCommand() *cobra.Command { var collectionArg string var nameArg string - var fieldsArg string + var fieldsArg []string var cmd = &cobra.Command{ Use: "create -c --collection --fields [-n --name ]", Short: "Creates a secondary index on a collection's field(s)", @@ -51,75 +34,34 @@ Example: create an index for 'Users' collection on 'name' field: Example: create a named index for 'Users' collection on 'name' field: defradb client index create --collection Users --fields name --name UsersByName`, ValidArgs: []string{"collection", "fields", "name"}, - RunE: func(cmd *cobra.Command, args []string) (err error) { - if collectionArg == "" || fieldsArg == "" { - if collectionArg == "" { - return NewErrMissingArg("collection") - } else { - return NewErrMissingArg("fields") - } - } + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.IndexPath) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } - - data := map[string]string{ - "collection": collectionArg, - "fields": fieldsArg, + var fields []client.IndexedFieldDescription + for _, name := range fieldsArg { + fields = append(fields, client.IndexedFieldDescription{Name: name}) } - if nameArg != "" { - data["name"] = nameArg + desc := client.IndexDescription{ + Name: nameArg, + Fields: fields, } - - jsonData, err := json.Marshal(data) + col, err := store.GetCollectionByName(cmd.Context(), collectionArg) if err != nil { return err } - - res, err := http.Post(endpoint.String(), "application/json", bytes.NewBuffer(jsonData)) - if err != nil { - return NewErrFailedToSendRequest(err) - } - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + col = col.WithTxn(tx) } - - stdout, err := os.Stdout.Stat() + desc, err = col.CreateIndex(cmd.Context(), desc) if err != nil { return err } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - r := indexCreateResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to create index", - logging.NewKV("Errors", r.Errors)) - } else { - log.FeedbackInfo(cmd.Context(), "Successfully created index", - logging.NewKV("Index", r.Data.Index)) - } - } - return nil + return writeJSON(cmd, desc) }, } cmd.Flags().StringVarP(&collectionArg, "collection", "c", "", "Collection name") cmd.Flags().StringVarP(&nameArg, "name", "n", "", "Index name") - cmd.Flags().StringVar(&fieldsArg, "fields", "", "Fields to index") + cmd.Flags().StringSliceVar(&fieldsArg, "fields", []string{}, "Fields to index") return cmd } diff --git a/cli/index_create_test.go b/cli/index_create_test.go deleted file mode 100644 index ac75248c10..0000000000 --- a/cli/index_create_test.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "bufio" - "bytes" - "context" - "encoding/json" - "io" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" -) - -const randomMultiaddr = "/ip4/0.0.0.0/tcp/0" - -func getTestConfig(t *testing.T) *config.Config { - cfg := config.DefaultConfig() - dir := t.TempDir() - cfg.Datastore.Store = "memory" - cfg.Datastore.Badger.Path = dir - cfg.Net.P2PDisabled = false - cfg.Net.P2PAddress = randomMultiaddr - cfg.Net.RPCAddress = "0.0.0.0:0" - cfg.Net.TCPAddress = randomMultiaddr - cfg.API.Address = "0.0.0.0:0" - return cfg -} - -func startTestNode(t *testing.T) (*config.Config, *defraInstance, func()) { - cfg := getTestConfig(t) - - ctx := context.Background() - di, err := start(ctx, cfg) - require.NoError(t, err) - return cfg, di, func() { di.close(ctx) } -} - -func parseLines(r io.Reader) ([]map[string]any, error) { - fileScanner := bufio.NewScanner(r) - - fileScanner.Split(bufio.ScanLines) - - logLines := []map[string]any{} - for fileScanner.Scan() { - loggedLine := make(map[string]any) - err := json.Unmarshal(fileScanner.Bytes(), &loggedLine) - if err != nil { - return nil, err - } - logLines = append(logLines, loggedLine) - } - - return logLines, nil -} - -func lineHas(lines []map[string]any, key, value string) bool { - for _, line := range lines { - if line[key] == value { - return true - } - } - return false -} - -func simulateConsoleOutput(t *testing.T) (*bytes.Buffer, func()) { - b := &bytes.Buffer{} - log.ApplyConfig(logging.Config{ - EncoderFormat: logging.NewEncoderFormatOption(logging.JSON), - Pipe: b, - }) - - f, err := os.CreateTemp(t.TempDir(), "tmpFile") - require.NoError(t, err) - originalStdout := os.Stdout - os.Stdout = f - - return b, func() { - os.Stdout = originalStdout - f.Close() - os.Remove(f.Name()) - } -} - -func execAddSchemaCmd(t *testing.T, cfg *config.Config, schema string) { - addSchemaCmd := MakeSchemaAddCommand(cfg) - err := addSchemaCmd.RunE(addSchemaCmd, []string{schema}) - require.NoError(t, err) -} - -func execCreateIndexCmd(t *testing.T, cfg *config.Config, collection, fields, name string) { - indexCreateCmd := MakeIndexCreateCommand(cfg) - indexCreateCmd.SetArgs([]string{ - "--collection", collection, - "--fields", fields, - "--name", name, - }) - err := indexCreateCmd.Execute() - require.NoError(t, err) -} - -func hasLogWithKey(logLines []map[string]any, key string) bool { - for _, logLine := range logLines { - if _, ok := logLine[key]; ok { - return true - } - } - return false -} - -func TestIndexCreateCmd_IfInvalidAddress_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - cfg.API.Address = "invalid address" - indexCreateCmd := MakeIndexCreateCommand(cfg) - - indexCreateCmd.SetArgs([]string{ - "--collection", "User", - "--fields", "Name", - "--name", "users_name_index", - }) - err := indexCreateCmd.Execute() - require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) -} - -func TestIndexCreateCmd_IfNoCollection_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - indexCreateCmd := MakeIndexCreateCommand(cfg) - - outputBuf := bytes.NewBufferString("") - indexCreateCmd.SetOut(outputBuf) - - indexCreateCmd.SetArgs([]string{ - "--collection", "User", - "--fields", "Name", - "--name", "users_name_index", - }) - err := indexCreateCmd.Execute() - require.NoError(t, err) - - out, err := io.ReadAll(outputBuf) - require.NoError(t, err) - - r := make(map[string]any) - err = json.Unmarshal(out, &r) - require.NoError(t, err) - - _, hasErrors := r["errors"] - assert.True(t, hasErrors, "command should return error") -} - -func TestIndexCreateCmd_IfNoErrors_ReturnData(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - execAddSchemaCmd(t, cfg, `type User { name: String }`) - - indexCreateCmd := MakeIndexCreateCommand(cfg) - outputBuf := bytes.NewBufferString("") - indexCreateCmd.SetOut(outputBuf) - - indexCreateCmd.SetArgs([]string{ - "--collection", "User", - "--fields", "name", - "--name", "users_name_index", - }) - err := indexCreateCmd.Execute() - require.NoError(t, err) - - out, err := io.ReadAll(outputBuf) - require.NoError(t, err) - - r := make(map[string]any) - err = json.Unmarshal(out, &r) - require.NoError(t, err) - - _, hasData := r["data"] - assert.True(t, hasData, "command should return data") -} - -func TestIndexCreateCmd_WithConsoleOutputIfNoCollection_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - indexCreateCmd := MakeIndexCreateCommand(cfg) - indexCreateCmd.SetArgs([]string{ - "--collection", "User", - "--fields", "Name", - "--name", "users_name_index", - }) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - err := indexCreateCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - assert.True(t, hasLogWithKey(logLines, "Errors")) -} - -func TestIndexCreateCmd_WithConsoleOutputIfNoErrors_ReturnData(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - execAddSchemaCmd(t, cfg, `type User { name: String }`) - - const indexName = "users_name_index" - indexCreateCmd := MakeIndexCreateCommand(cfg) - indexCreateCmd.SetArgs([]string{ - "--collection", "User", - "--fields", "name", - "--name", indexName, - }) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - err := indexCreateCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.Len(t, logLines, 1) - result, ok := logLines[0]["Index"].(map[string]any) - require.True(t, ok) - assert.Equal(t, indexName, result["Name"]) - - assert.False(t, hasLogWithKey(logLines, "Errors")) -} diff --git a/cli/index_drop.go b/cli/index_drop.go index ef0a37db0c..03639fb277 100644 --- a/cli/index_drop.go +++ b/cli/index_drop.go @@ -11,29 +11,12 @@ package cli import ( - "bytes" - "encoding/json" - "io" - "net/http" - "os" - "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/defradb/datastore" ) -type indexDropResponse struct { - Data struct { - Result string `json:"result"` - } `json:"data"` - Errors []struct { - Message string `json:"message"` - } `json:"errors"` -} - -func MakeIndexDropCommand(cfg *config.Config) *cobra.Command { +func MakeIndexDropCommand() *cobra.Command { var collectionArg string var nameArg string var cmd = &cobra.Command{ @@ -44,74 +27,17 @@ func MakeIndexDropCommand(cfg *config.Config) *cobra.Command { Example: drop the index 'UsersByName' for 'Users' collection: defradb client index create --collection Users --name UsersByName`, ValidArgs: []string{"collection", "name"}, - RunE: func(cmd *cobra.Command, args []string) (err error) { - if collectionArg == "" || nameArg == "" { - if collectionArg == "" { - return NewErrMissingArg("collection") - } else { - return NewErrMissingArg("name") - } - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.IndexPath) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } - - data := map[string]string{ - "collection": collectionArg, - "name": nameArg, - } + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - jsonData, err := json.Marshal(data) + col, err := store.GetCollectionByName(cmd.Context(), collectionArg) if err != nil { return err } - - req, err := http.NewRequest("DELETE", endpoint.String(), bytes.NewBuffer(jsonData)) - if err != nil { - return NewErrFailedToSendRequest(err) - } - req.Header.Add("Content-Type", "application/json") - client := &http.Client{} - res, err := client.Do(req) - if err != nil { - return NewErrFailedToSendRequest(err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return err - } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - r := indexDropResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to drop index", - logging.NewKV("Errors", r.Errors)) - } else { - log.FeedbackInfo(cmd.Context(), "Successfully dropped index", - logging.NewKV("Result", r.Data.Result)) - } + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + col = col.WithTxn(tx) } - return nil + return col.DropIndex(cmd.Context(), nameArg) }, } cmd.Flags().StringVarP(&collectionArg, "collection", "c", "", "Collection name") diff --git a/cli/index_drop_test.go b/cli/index_drop_test.go deleted file mode 100644 index 7fa368a458..0000000000 --- a/cli/index_drop_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "bytes" - "encoding/json" - "io" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestIndexDropCmd_IfInvalidAddress_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - cfg.API.Address = "invalid address" - indexDropCmd := MakeIndexDropCommand(cfg) - - indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) - err := indexDropCmd.Execute() - require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) -} - -func TestIndexDropCmd_IfNoCollection_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - indexDropCmd := MakeIndexDropCommand(cfg) - - outputBuf := bytes.NewBufferString("") - indexDropCmd.SetOut(outputBuf) - - indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) - err := indexDropCmd.Execute() - require.NoError(t, err) - - out, err := io.ReadAll(outputBuf) - require.NoError(t, err) - - r := make(map[string]any) - err = json.Unmarshal(out, &r) - require.NoError(t, err) - - _, hasErrors := r["errors"] - assert.True(t, hasErrors, "command should return error") -} - -func TestIndexDropCmd_IfNoErrors_ShouldReturnData(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - execAddSchemaCmd(t, cfg, `type User { name: String }`) - execCreateIndexCmd(t, cfg, "User", "name", "users_name_index") - - indexDropCmd := MakeIndexDropCommand(cfg) - outputBuf := bytes.NewBufferString("") - indexDropCmd.SetOut(outputBuf) - - indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) - err := indexDropCmd.Execute() - require.NoError(t, err) - - out, err := io.ReadAll(outputBuf) - require.NoError(t, err) - - r := make(map[string]any) - err = json.Unmarshal(out, &r) - require.NoError(t, err) - - _, hasData := r["data"] - assert.True(t, hasData, "command should return data") -} - -func TestIndexDropCmd_WithConsoleOutputIfNoCollection_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - indexDropCmd := MakeIndexDropCommand(cfg) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) - err := indexDropCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - assert.True(t, hasLogWithKey(logLines, "Errors")) -} - -func TestIndexDropCmd_WithConsoleOutputIfNoErrors_ShouldReturnData(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - execAddSchemaCmd(t, cfg, `type User { name: String }`) - execCreateIndexCmd(t, cfg, "User", "name", "users_name_index") - - indexDropCmd := MakeIndexDropCommand(cfg) - indexDropCmd.SetArgs([]string{"--collection", "User", "--name", "users_name_index"}) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - err := indexDropCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.Len(t, logLines, 1) - assert.Equal(t, "success", logLines[0]["Result"]) - - assert.False(t, hasLogWithKey(logLines, "Errors")) -} diff --git a/cli/index_list.go b/cli/index_list.go index 131782cfe5..92ada3e007 100644 --- a/cli/index_list.go +++ b/cli/index_list.go @@ -11,31 +11,12 @@ package cli import ( - "encoding/json" - "io" - "net/http" - "net/url" - "os" - "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" + "github.com/sourcenetwork/defradb/datastore" ) -type indexListResponse struct { - Data struct { - Collections map[string][]client.IndexDescription `json:"collections"` - Indexes []client.IndexDescription `json:"indexes"` - } `json:"data"` - Errors []struct { - Message string `json:"message"` - } `json:"errors"` -} - -func MakeIndexListCommand(cfg *config.Config) *cobra.Command { +func MakeIndexListCommand() *cobra.Command { var collectionArg string var cmd = &cobra.Command{ Use: "list [-c --collection ]", @@ -48,60 +29,30 @@ Otherwise, all indexes in the database will be shown. Example: show all index for 'Users' collection: defradb client index list --collection Users`, ValidArgs: []string{"collection"}, - RunE: func(cmd *cobra.Command, args []string) (err error) { - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.IndexPath) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - if collectionArg != "" { - values := url.Values{ - "collection": {collectionArg}, + switch { + case collectionArg != "": + col, err := store.GetCollectionByName(cmd.Context(), collectionArg) + if err != nil { + return err } - endpoint.RawQuery = values.Encode() - } - - res, err := http.Get(endpoint.String()) - if err != nil { - return NewErrFailedToSendRequest(err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + col = col.WithTxn(tx) } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return err - } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - r := indexListResponse{} - err = json.Unmarshal(response, &r) + indexes, err := col.GetIndexes(cmd.Context()) if err != nil { - return NewErrFailedToUnmarshalResponse(err) + return err } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to list index", - logging.NewKV("Errors", r.Errors)) - } else if collectionArg != "" { - log.FeedbackInfo(cmd.Context(), "Fetched indexes for collection "+collectionArg, - logging.NewKV("Indexes", r.Data.Indexes)) - } else { - log.FeedbackInfo(cmd.Context(), "Fetched all indexes", - logging.NewKV("Collections", r.Data.Collections)) + return writeJSON(cmd, indexes) + default: + indexes, err := store.GetAllIndexes(cmd.Context()) + if err != nil { + return err } + return writeJSON(cmd, indexes) } - return nil }, } cmd.Flags().StringVarP(&collectionArg, "collection", "c", "", "Collection name") diff --git a/cli/index_list_test.go b/cli/index_list_test.go deleted file mode 100644 index 548d2af040..0000000000 --- a/cli/index_list_test.go +++ /dev/null @@ -1,145 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "bytes" - "encoding/json" - "io" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestIndexListCmd_IfInvalidAddress_ReturnError(t *testing.T) { - cfg := getTestConfig(t) - cfg.API.Address = "invalid address" - indexCreateCmd := MakeIndexListCommand(cfg) - - err := indexCreateCmd.RunE(indexCreateCmd, nil) - require.ErrorIs(t, err, NewErrFailedToJoinEndpoint(err)) -} - -func TestIndexListCmd_IfNoErrors_ShouldReturnData(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - execAddSchemaCmd(t, cfg, `type User { name: String }`) - execCreateIndexCmd(t, cfg, "User", "name", "users_name_index") - - indexListCmd := MakeIndexListCommand(cfg) - outputBuf := bytes.NewBufferString("") - indexListCmd.SetOut(outputBuf) - - err := indexListCmd.Execute() - require.NoError(t, err) - - out, err := io.ReadAll(outputBuf) - require.NoError(t, err) - - r := make(map[string]any) - err = json.Unmarshal(out, &r) - require.NoError(t, err) - - _, hasData := r["data"] - assert.True(t, hasData, "command should return data") -} - -func TestIndexListCmd_WithConsoleOutputIfCollectionDoesNotExist_ReturnError(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - indexListCmd := MakeIndexListCommand(cfg) - indexListCmd.SetArgs([]string{"--collection", "User"}) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - err := indexListCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.True(t, hasLogWithKey(logLines, "Errors")) -} - -func TestIndexListCmd_WithConsoleOutputIfCollectionIsGiven_ReturnCollectionList(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - const indexName = "users_name_index" - execAddSchemaCmd(t, cfg, `type User { name: String }`) - execCreateIndexCmd(t, cfg, "User", "name", indexName) - - indexListCmd := MakeIndexListCommand(cfg) - indexListCmd.SetArgs([]string{"--collection", "User"}) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - err := indexListCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.Len(t, logLines, 1) - resultList, ok := logLines[0]["Indexes"].([]any) - require.True(t, ok) - require.Len(t, resultList, 1) - result, ok := resultList[0].(map[string]any) - require.True(t, ok) - assert.Equal(t, indexName, result["Name"]) - - assert.False(t, hasLogWithKey(logLines, "Errors")) -} - -func TestIndexListCmd_WithConsoleOutputIfNoArgs_ReturnAllIndexes(t *testing.T) { - cfg, _, close := startTestNode(t) - defer close() - - const userIndexName = "users_name_index" - const productIndexName = "product_price_index" - execAddSchemaCmd(t, cfg, `type User { name: String }`) - execAddSchemaCmd(t, cfg, `type Product { price: Int }`) - execCreateIndexCmd(t, cfg, "User", "name", userIndexName) - execCreateIndexCmd(t, cfg, "Product", "price", productIndexName) - - indexListCmd := MakeIndexListCommand(cfg) - - outputBuf, revertOutput := simulateConsoleOutput(t) - defer revertOutput() - - err := indexListCmd.Execute() - require.NoError(t, err) - - logLines, err := parseLines(outputBuf) - require.NoError(t, err) - require.Len(t, logLines, 1) - resultCollections, ok := logLines[0]["Collections"].(map[string]any) - require.True(t, ok) - - userCollection, ok := resultCollections["User"].([]any) - require.True(t, ok) - require.Len(t, userCollection, 1) - userIndex, ok := userCollection[0].(map[string]any) - require.True(t, ok) - require.Equal(t, userIndexName, userIndex["Name"]) - - productCollection, ok := resultCollections["Product"].([]any) - require.True(t, ok) - require.Len(t, productCollection, 1) - productIndex, ok := productCollection[0].(map[string]any) - require.True(t, ok) - require.Equal(t, productIndexName, productIndex["Name"]) - - assert.False(t, hasLogWithKey(logLines, "Errors")) -} diff --git a/cli/blocks.go b/cli/p2p.go similarity index 75% rename from cli/blocks.go rename to cli/p2p.go index 9e55c36d22..ee084cc67b 100644 --- a/cli/blocks.go +++ b/cli/p2p.go @@ -14,11 +14,11 @@ import ( "github.com/spf13/cobra" ) -func MakeBlocksCommand() *cobra.Command { +func MakeP2PCommand() *cobra.Command { var cmd = &cobra.Command{ - Use: "blocks", - Short: "Interact with the database's blockstore", + Use: "p2p", + Short: "Interact with the DefraDB P2P system", + Long: "Interact with the DefraDB P2P system", } - return cmd } diff --git a/cli/p2p_collection.go b/cli/p2p_collection.go index 6ce6d8e7c7..140ac4cc34 100644 --- a/cli/p2p_collection.go +++ b/cli/p2p_collection.go @@ -16,7 +16,7 @@ import ( func MakeP2PCollectionCommand() *cobra.Command { var cmd = &cobra.Command{ - Use: "p2pcollection", + Use: "collection", Short: "Configure the P2P collection system", Long: `Add, delete, or get the list of P2P collections. The selected collections synchronize their events on the pubsub network.`, diff --git a/cli/p2p_collection_add.go b/cli/p2p_collection_add.go index 46a4f171e1..dedae0a358 100644 --- a/cli/p2p_collection_add.go +++ b/cli/p2p_collection_add.go @@ -11,51 +11,38 @@ package cli import ( - "context" + "strings" "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" - netclient "github.com/sourcenetwork/defradb/net/api/client" ) -func MakeP2PCollectionAddCommand(cfg *config.Config) *cobra.Command { +func MakeP2PCollectionAddCommand() *cobra.Command { var cmd = &cobra.Command{ - Use: "add [collectionID]", + Use: "add [collectionIDs]", Short: "Add P2P collections", Long: `Add P2P collections to the synchronized pubsub topics. -The collections are synchronized between nodes of a pubsub network.`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { - return errors.New("must specify at least one collectionID") - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - cred := insecure.NewCredentials() - client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) - if err != nil { - return ErrFailedToCreateRPCClient - } +The collections are synchronized between nodes of a pubsub network. - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() - if err != nil { - return errors.Wrap("failed to parse RPC timeout duration", err) - } - - ctx, cancel := context.WithTimeout(cmd.Context(), rpcTimeoutDuration) - defer cancel() +Example: add single collection + defradb client p2p collection add bae123 - err = client.AddP2PCollections(ctx, args...) - if err != nil { - return errors.Wrap("failed to add P2P collections, request failed", err) +Example: add multiple collections + defradb client p2p collection add bae123,bae456 + `, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + p2p := mustGetP2PContext(cmd) + + var collectionIDs []string + for _, id := range strings.Split(args[0], ",") { + id = strings.TrimSpace(id) + if id == "" { + continue + } + collectionIDs = append(collectionIDs, id) } - log.FeedbackInfo(ctx, "Successfully added P2P collections", logging.NewKV("Collections", args)) - return nil + + return p2p.AddP2PCollections(cmd.Context(), collectionIDs) }, } return cmd diff --git a/cli/p2p_collection_getall.go b/cli/p2p_collection_getall.go index cb9c9f4025..10d98582c6 100644 --- a/cli/p2p_collection_getall.go +++ b/cli/p2p_collection_getall.go @@ -11,60 +11,24 @@ package cli import ( - "context" - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" - netclient "github.com/sourcenetwork/defradb/net/api/client" ) -func MakeP2PCollectionGetallCommand(cfg *config.Config) *cobra.Command { +func MakeP2PCollectionGetAllCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "getall", Short: "Get all P2P collections", Long: `Get all P2P collections in the pubsub topics. This is the list of collections of the node that are synchronized on the pubsub network.`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.NoArgs(cmd, args); err != nil { - return errors.New("must specify no argument") - } - return nil - }, + Args: cobra.NoArgs, RunE: func(cmd *cobra.Command, args []string) error { - cred := insecure.NewCredentials() - client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) - if err != nil { - return ErrFailedToCreateRPCClient - } + p2p := mustGetP2PContext(cmd) - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() + cols, err := p2p.GetAllP2PCollections(cmd.Context()) if err != nil { - return errors.Wrap("failed to parse RPC timeout duration", err) + return err } - - ctx, cancel := context.WithTimeout(cmd.Context(), rpcTimeoutDuration) - defer cancel() - - collections, err := client.GetAllP2PCollections(ctx) - if err != nil { - return errors.Wrap("failed to add P2P collections, request failed", err) - } - - if len(collections) > 0 { - log.FeedbackInfo(ctx, "Successfully got all P2P collections") - for _, col := range collections { - log.FeedbackInfo(ctx, col.Name, logging.NewKV("CollectionID", col.ID)) - } - } else { - log.FeedbackInfo(ctx, "No P2P collection found") - } - - return nil + return writeJSON(cmd, cols) }, } return cmd diff --git a/cli/p2p_collection_remove.go b/cli/p2p_collection_remove.go index 66dbd5fa16..8aa0b5b7df 100644 --- a/cli/p2p_collection_remove.go +++ b/cli/p2p_collection_remove.go @@ -11,51 +11,38 @@ package cli import ( - "context" + "strings" "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" - netclient "github.com/sourcenetwork/defradb/net/api/client" ) -func MakeP2PCollectionRemoveCommand(cfg *config.Config) *cobra.Command { +func MakeP2PCollectionRemoveCommand() *cobra.Command { var cmd = &cobra.Command{ - Use: "remove [collectionID]", + Use: "remove [collectionIDs]", Short: "Remove P2P collections", Long: `Remove P2P collections from the followed pubsub topics. -The removed collections will no longer be synchronized between nodes.`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.MinimumNArgs(1)(cmd, args); err != nil { - return errors.New("must specify at least one collectionID") - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - cred := insecure.NewCredentials() - client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) - if err != nil { - return ErrFailedToCreateRPCClient - } +The removed collections will no longer be synchronized between nodes. - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() - if err != nil { - return errors.Wrap("failed to parse RPC timeout duration", err) - } - - ctx, cancel := context.WithTimeout(cmd.Context(), rpcTimeoutDuration) - defer cancel() +Example: remove single collection + defradb client p2p collection remove bae123 - err = client.RemoveP2PCollections(ctx, args...) - if err != nil { - return errors.Wrap("failed to remove P2P collections, request failed", err) +Example: remove multiple collections + defradb client p2p collection remove bae123,bae456 + `, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + p2p := mustGetP2PContext(cmd) + + var collectionIDs []string + for _, id := range strings.Split(args[0], ",") { + id = strings.TrimSpace(id) + if id == "" { + continue + } + collectionIDs = append(collectionIDs, id) } - log.FeedbackInfo(ctx, "Successfully removed P2P collections", logging.NewKV("Collections", args)) - return nil + + return p2p.RemoveP2PCollections(cmd.Context(), collectionIDs) }, } return cmd diff --git a/cli/p2p_info.go b/cli/p2p_info.go new file mode 100644 index 0000000000..36adfb8fac --- /dev/null +++ b/cli/p2p_info.go @@ -0,0 +1,30 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/http" +) + +func MakeP2PInfoCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "info", + Short: "Get peer info from a DefraDB node", + Long: `Get peer info from a DefraDB node`, + RunE: func(cmd *cobra.Command, args []string) error { + db := cmd.Context().Value(dbContextKey).(*http.Client) + return writeJSON(cmd, db.PeerInfo()) + }, + } + return cmd +} diff --git a/cli/replicator.go b/cli/p2p_replicator.go similarity index 93% rename from cli/replicator.go rename to cli/p2p_replicator.go index c7956c80a6..d12684be51 100644 --- a/cli/replicator.go +++ b/cli/p2p_replicator.go @@ -14,7 +14,7 @@ import ( "github.com/spf13/cobra" ) -func MakeReplicatorCommand() *cobra.Command { +func MakeP2PReplicatorCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "replicator", Short: "Configure the replicator system", diff --git a/cli/p2p_replicator_delete.go b/cli/p2p_replicator_delete.go new file mode 100644 index 0000000000..6cc2ddf785 --- /dev/null +++ b/cli/p2p_replicator_delete.go @@ -0,0 +1,51 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeP2PReplicatorDeleteCommand() *cobra.Command { + var collections []string + var cmd = &cobra.Command{ + Use: "delete [-c, --collection] ", + Short: "Delete replicator(s) and stop synchronization", + Long: `Delete replicator(s) and stop synchronization. +A replicator synchronizes one or all collection(s) from this node to another. + +Example: + defradb client p2p replicator delete -c Users '{"ID": "12D3", "Addrs": ["/ip4/0.0.0.0/tcp/9171"]}' + `, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + p2p := mustGetP2PContext(cmd) + + var info peer.AddrInfo + if err := json.Unmarshal([]byte(args[0]), &info); err != nil { + return err + } + rep := client.Replicator{ + Info: info, + Schemas: collections, + } + return p2p.DeleteReplicator(cmd.Context(), rep) + }, + } + cmd.Flags().StringSliceVarP(&collections, "collection", "c", + []string{}, "Collection(s) to stop replicating") + return cmd +} diff --git a/cli/p2p_replicator_getall.go b/cli/p2p_replicator_getall.go new file mode 100644 index 0000000000..4bdf6e8487 --- /dev/null +++ b/cli/p2p_replicator_getall.go @@ -0,0 +1,38 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeP2PReplicatorGetAllCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "getall", + Short: "Get all replicators", + Long: `Get all the replicators active in the P2P data sync system. +A replicator synchronizes one or all collection(s) from this node to another. + +Example: + defradb client p2p replicator getall + `, + RunE: func(cmd *cobra.Command, args []string) error { + p2p := mustGetP2PContext(cmd) + + reps, err := p2p.GetAllReplicators(cmd.Context()) + if err != nil { + return err + } + return writeJSON(cmd, reps) + }, + } + return cmd +} diff --git a/cli/p2p_replicator_set.go b/cli/p2p_replicator_set.go new file mode 100644 index 0000000000..5d9c712a82 --- /dev/null +++ b/cli/p2p_replicator_set.go @@ -0,0 +1,52 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + + "github.com/libp2p/go-libp2p/core/peer" + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeP2PReplicatorSetCommand() *cobra.Command { + var collections []string + var cmd = &cobra.Command{ + Use: "set [-c, --collection] ", + Short: "Add replicator(s) and start synchronization", + Long: `Add replicator(s) and start synchronization. +A replicator synchronizes one or all collection(s) from this node to another. + +Example: + defradb client p2p replicator set -c Users '{"ID": "12D3", "Addrs": ["/ip4/0.0.0.0/tcp/9171"]}' +`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + p2p := mustGetP2PContext(cmd) + + var info peer.AddrInfo + if err := json.Unmarshal([]byte(args[0]), &info); err != nil { + return err + } + rep := client.Replicator{ + Info: info, + Schemas: collections, + } + return p2p.SetReplicator(cmd.Context(), rep) + }, + } + + cmd.Flags().StringSliceVarP(&collections, "collection", "c", + []string{}, "Collection(s) to replicate") + return cmd +} diff --git a/cli/peerid.go b/cli/peerid.go deleted file mode 100644 index a3d269fb2d..0000000000 --- a/cli/peerid.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "encoding/json" - "io" - "net/http" - "os" - - "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" -) - -func MakePeerIDCommand(cfg *config.Config) *cobra.Command { - var cmd = &cobra.Command{ - Use: "peerid", - Short: "Get the PeerID of the node", - Long: `Get the PeerID of the node.`, - RunE: func(cmd *cobra.Command, _ []string) (err error) { - stdout, err := os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - if !isFileInfoPipe(stdout) { - log.FeedbackInfo(cmd.Context(), "Requesting PeerID...") - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.PeerIDPath) - if err != nil { - return errors.Wrap("failed to join endpoint", err) - } - - res, err := http.Get(endpoint.String()) - if err != nil { - return errors.Wrap("failed to request PeerID", err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - if res.StatusCode == http.StatusNotFound { - r := httpapi.ErrorResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return errors.Wrap("parsing of response failed", err) - } - if len(r.Errors) > 0 { - if isFileInfoPipe(stdout) { - b, err := json.Marshal(r.Errors[0]) - if err != nil { - return errors.Wrap("mashalling error response failed", err) - } - cmd.Println(string(b)) - } else { - log.FeedbackInfo(cmd.Context(), r.Errors[0].Message) - } - return nil - } - return errors.New("no PeerID available. P2P might be disabled") - } - - r := httpapi.DataResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return errors.Wrap("parsing of response failed", err) - } - if isFileInfoPipe(stdout) { - b, err := json.Marshal(r.Data) - if err != nil { - return errors.Wrap("mashalling data response failed", err) - } - cmd.Println(string(b)) - } else if data, ok := r.Data.(map[string]any); ok { - log.FeedbackInfo(cmd.Context(), data["peerID"].(string)) - } - - return nil - }, - } - return cmd -} diff --git a/cli/peerid_test.go b/cli/peerid_test.go deleted file mode 100644 index 34874ef80d..0000000000 --- a/cli/peerid_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "bytes" - "context" - "encoding/json" - "io" - "net/http" - "testing" - - "github.com/stretchr/testify/assert" - - httpapi "github.com/sourcenetwork/defradb/api/http" -) - -func TestGetPeerIDCmd(t *testing.T) { - cfg := getTestConfig(t) - peerIDCmd := MakePeerIDCommand(cfg) - dir := t.TempDir() - ctx := context.Background() - cfg.Datastore.Store = "memory" - cfg.Datastore.Badger.Path = dir - cfg.Net.P2PDisabled = false - - di, err := start(ctx, cfg) - if err != nil { - t.Fatal(err) - } - defer di.close(ctx) - - b := bytes.NewBufferString("") - peerIDCmd.SetOut(b) - - err = peerIDCmd.RunE(peerIDCmd, nil) - if err != nil { - t.Fatal(err) - } - - out, err := io.ReadAll(b) - if err != nil { - t.Fatal(err) - } - - r := make(map[string]any) - err = json.Unmarshal(out, &r) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, di.node.PeerID().String(), r["peerID"]) -} - -func TestGetPeerIDCmdWithNoP2P(t *testing.T) { - cfg := getTestConfig(t) - peerIDCmd := MakePeerIDCommand(cfg) - dir := t.TempDir() - ctx := context.Background() - cfg.Datastore.Store = "memory" - cfg.Datastore.Badger.Path = dir - cfg.Net.P2PDisabled = true - - di, err := start(ctx, cfg) - if err != nil { - t.Fatal(err) - } - defer di.close(ctx) - - b := bytes.NewBufferString("") - peerIDCmd.SetOut(b) - - err = peerIDCmd.RunE(peerIDCmd, nil) - if err != nil { - t.Fatal(err) - } - - out, err := io.ReadAll(b) - if err != nil { - t.Fatal(err) - } - - r := httpapi.ErrorItem{} - err = json.Unmarshal(out, &r) - if err != nil { - t.Fatal(err) - } - - assert.Equal(t, http.StatusNotFound, r.Extensions.Status) - assert.Equal(t, "Not Found", r.Extensions.HTTPError) - assert.Equal(t, "no PeerID available. P2P might be disabled", r.Message) -} diff --git a/cli/ping.go b/cli/ping.go deleted file mode 100644 index 210847dfcc..0000000000 --- a/cli/ping.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "encoding/json" - "io" - "net/http" - "os" - - "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" -) - -func MakePingCommand(cfg *config.Config) *cobra.Command { - var cmd = &cobra.Command{ - Use: "ping", - Short: "Ping to test connection with a node", - RunE: func(cmd *cobra.Command, _ []string) (err error) { - stdout, err := os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - if !isFileInfoPipe(stdout) { - log.FeedbackInfo(cmd.Context(), "Sending ping...") - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.PingPath) - if err != nil { - return errors.Wrap("failed to join endpoint", err) - } - - res, err := http.Get(endpoint.String()) - if err != nil { - return errors.Wrap("failed to send ping", err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - type pingResponse struct { - Data struct { - Response string `json:"response"` - } `json:"data"` - } - r := pingResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return errors.Wrap("parsing of response failed", err) - } - log.FeedbackInfo(cmd.Context(), r.Data.Response) - } - return nil - }, - } - return cmd -} diff --git a/cli/replicator_delete.go b/cli/replicator_delete.go deleted file mode 100644 index eb7e580f12..0000000000 --- a/cli/replicator_delete.go +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - - "github.com/libp2p/go-libp2p/core/peer" - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" - netclient "github.com/sourcenetwork/defradb/net/api/client" -) - -func MakeReplicatorDeleteCommand(cfg *config.Config) *cobra.Command { - var ( - fullRep bool - col []string - ) - var cmd = &cobra.Command{ - Use: "delete [-f, --full | -c, --collection] ", - Short: "Delete a replicator. It will stop synchronizing", - Long: `Delete a replicator. It will stop synchronizing.`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.ExactArgs(1)(cmd, args); err != nil { - return errors.New("must specify one argument: PeerID") - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - pidString := args[0] - - if len(col) == 0 && !fullRep { - return errors.New("must run with either --full or --collection") - } - - cred := insecure.NewCredentials() - client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) - if err != nil { - return ErrFailedToCreateRPCClient - } - - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() - if err != nil { - return errors.Wrap("failed to parse RPC timeout duration", err) - } - - ctx, cancel := context.WithTimeout(cmd.Context(), rpcTimeoutDuration) - defer cancel() - - pid, err := peer.Decode(pidString) - if err != nil { - return NewErrFailedParsePeerID(err) - } - - err = client.DeleteReplicator(ctx, pid) - if err != nil { - return errors.Wrap("failed to delete replicator, request failed", err) - } - log.FeedbackInfo(ctx, "Successfully deleted replicator", logging.NewKV("PeerID", pid.String())) - return nil - }, - } - cmd.Flags().BoolVarP(&fullRep, "full", "f", false, "Set the replicator to act on all collections") - cmd.Flags().StringArrayVarP(&col, "collection", "c", - []string{}, "Define the collection for the replicator") - cmd.MarkFlagsMutuallyExclusive("full", "collection") - return cmd -} diff --git a/cli/replicator_getall.go b/cli/replicator_getall.go deleted file mode 100644 index 63cd6533ba..0000000000 --- a/cli/replicator_getall.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" - netclient "github.com/sourcenetwork/defradb/net/api/client" -) - -func MakeReplicatorGetallCommand(cfg *config.Config) *cobra.Command { - var cmd = &cobra.Command{ - Use: "getall", - Short: "Get all replicators", - Long: `Get all the replicators active in the P2P data sync system. -These are the replicators that are currently replicating data from one node to another.`, - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) != 0 { - if err := cmd.Usage(); err != nil { - return err - } - return errors.New("must specify no argument") - } - - log.FeedbackInfo( - cmd.Context(), - "Getting all replicators", - logging.NewKV("RPCAddress", cfg.Net.RPCAddress), - ) - - cred := insecure.NewCredentials() - client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) - if err != nil { - return errors.Wrap("failed to create RPC client", err) - } - - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() - if err != nil { - return errors.Wrap("failed to parse RPC timeout duration", err) - } - - ctx, cancel := context.WithTimeout(cmd.Context(), rpcTimeoutDuration) - defer cancel() - - reps, err := client.GetAllReplicators(ctx) - if err != nil { - return errors.Wrap("failed to get replicators, request failed", err) - } - if len(reps) > 0 { - log.FeedbackInfo(ctx, "Successfully got all replicators") - for _, rep := range reps { - log.FeedbackInfo( - ctx, - rep.Info.ID.String(), - logging.NewKV("Schemas", rep.Schemas), - logging.NewKV("Addrs", rep.Info.Addrs), - ) - } - } else { - log.FeedbackInfo(ctx, "No replicator found") - } - - return nil - }, - } - return cmd -} diff --git a/cli/replicator_set.go b/cli/replicator_set.go deleted file mode 100644 index acb70d0cfd..0000000000 --- a/cli/replicator_set.go +++ /dev/null @@ -1,86 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - - ma "github.com/multiformats/go-multiaddr" - "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials/insecure" - - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" - netclient "github.com/sourcenetwork/defradb/net/api/client" -) - -func MakeReplicatorSetCommand(cfg *config.Config) *cobra.Command { - var ( - fullRep bool - col []string - ) - var cmd = &cobra.Command{ - Use: "set [-f, --full | -c, --collection] ", - Short: "Set a P2P replicator", - Long: `Add a new target replicator. -A replicator replicates one or all collection(s) from this node to another. -`, - Args: func(cmd *cobra.Command, args []string) error { - if err := cobra.ExactArgs(1)(cmd, args); err != nil { - return errors.New("must specify one argument: peer") - } - return nil - }, - RunE: func(cmd *cobra.Command, args []string) error { - peerAddr, err := ma.NewMultiaddr(args[0]) - if err != nil { - return NewErrFailedParsePeerID(err) - } - if len(col) == 0 && !fullRep { - return errors.New("must run with either --full or --collection") - } - - cred := insecure.NewCredentials() - client, err := netclient.NewClient(cfg.Net.RPCAddress, grpc.WithTransportCredentials(cred)) - if err != nil { - return ErrFailedToCreateRPCClient - } - - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() - if err != nil { - return errors.Wrap("failed to parse RPC timeout duration", err) - } - - ctx, cancel := context.WithTimeout(cmd.Context(), rpcTimeoutDuration) - defer cancel() - - pid, err := client.SetReplicator(ctx, peerAddr, col...) - if err != nil { - return errors.Wrap("failed to add replicator, request failed", err) - } - log.FeedbackInfo( - ctx, - "Successfully added replicator", - logging.NewKV("PeerID", pid), - logging.NewKV("Collections", col), - ) - return nil - }, - } - - cmd.Flags().BoolVarP(&fullRep, "full", "f", false, "Set the replicator to act on all collections") - cmd.Flags().StringArrayVarP(&col, "collection", "c", - []string{}, "Define the collection for the replicator") - cmd.MarkFlagsMutuallyExclusive("full", "collection") - return cmd -} diff --git a/cli/request.go b/cli/request.go index 1b8f86ced8..56e33d7c4a 100644 --- a/cli/request.go +++ b/cli/request.go @@ -12,18 +12,19 @@ package cli import ( "io" - "net/http" - "net/url" "os" "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/errors" ) -func MakeRequestCommand(cfg *config.Config) *cobra.Command { +const ( + REQ_RESULTS_HEADER = "------ Request Results ------\n" + SUB_RESULTS_HEADER = "------ Subscription Results ------\n" +) + +func MakeRequestCommand() *cobra.Command { var filePath string var cmd = &cobra.Command{ Use: "query [query request]", @@ -43,101 +44,43 @@ A GraphQL client such as GraphiQL (https://github.com/graphql/graphiql) can be u with the database more conveniently. To learn more about the DefraDB GraphQL Query Language, refer to https://docs.source.network.`, - RunE: func(cmd *cobra.Command, args []string) (err error) { - var request string + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - fi, err := os.Stdin.Stat() - if err != nil { - return err - } - - if filePath != "" { - bytes, err := os.ReadFile(filePath) + var request string + switch { + case filePath != "": + data, err := os.ReadFile(filePath) if err != nil { - return ErrFailedToReadFile - } - request = string(bytes) - } else if len(args) > 1 { - if err = cmd.Usage(); err != nil { return err } - return errors.New("too many arguments") - } else if isFileInfoPipe(fi) && (len(args) == 0 || args[0] != "-") { - log.FeedbackInfo( - cmd.Context(), - "Run 'defradb client query -' to read from stdin. Example: 'cat my.graphql | defradb client query -').", - ) - return nil - } else if len(args) == 0 { - err := cmd.Help() + request = string(data) + case len(args) > 0 && args[0] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) if err != nil { - return errors.Wrap("failed to print help", err) - } - return nil - } else if args[0] == "-" { - stdin, err := readStdin() - if err != nil { - return errors.Wrap("failed to read stdin", err) - } - if len(stdin) == 0 { - return errors.New("no query request in stdin provided") - } else { - request = stdin + return err } - } else { - request = args[0] + request = string(data) + case len(args) > 0: + request = string(args[0]) } if request == "" { return errors.New("request cannot be empty") } + result := store.ExecRequest(cmd.Context(), request) - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.GraphQLPath) - if err != nil { - return errors.Wrap("joining paths failed", err) + var errors []string + for _, err := range result.GQL.Errors { + errors = append(errors, err.Error()) } - - p := url.Values{} - p.Add("query", request) - endpoint.RawQuery = p.Encode() - - res, err := http.Get(endpoint.String()) - if err != nil { - return errors.Wrap("failed request", err) + if result.Pub == nil { + cmd.Print(REQ_RESULTS_HEADER) + return writeJSON(cmd, map[string]any{"data": result.GQL.Data, "errors": errors}) } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - fi, err = os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - - if isFileInfoPipe(fi) { - cmd.Println(string(response)) - } else { - graphlErr, err := hasGraphQLErrors(response) - if err != nil { - return errors.Wrap("failed to handle GraphQL errors", err) - } - indentedResult, err := indentJSON(response) - if err != nil { - return errors.Wrap("failed to pretty print result", err) - } - if graphlErr { - log.FeedbackError(cmd.Context(), indentedResult) - } else { - log.FeedbackInfo(cmd.Context(), indentedResult) - } + cmd.Print(SUB_RESULTS_HEADER) + for item := range result.Pub.Stream() { + writeJSON(cmd, item) //nolint:errcheck } return nil }, diff --git a/cli/root.go b/cli/root.go index e639cde785..729b638f02 100644 --- a/cli/root.go +++ b/cli/root.go @@ -16,34 +16,19 @@ import ( "github.com/spf13/cobra" "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" ) func MakeRootCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ - Use: "defradb", - Short: "DefraDB Edge Database", + SilenceUsage: true, + Use: "defradb", + Short: "DefraDB Edge Database", Long: `DefraDB is the edge database to power the user-centric future. Start a DefraDB node, interact with a local or remote node, and much more. `, - // Runs on subcommands before their Run function, to handle configuration and top-level flags. - // Loads the rootDir containing the configuration file, otherwise warn about it and load a default configuration. - // This allows some subcommands (`init`, `start`) to override the PreRun to create a rootDir by default. - PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { - if err := cfg.LoadRootDirFromFlagOrDefault(); err != nil { - return err - } - if cfg.ConfigFileExists() { - if err := cfg.LoadWithRootdir(true); err != nil { - return errors.Wrap("failed to load config", err) - } - } else { - if err := cfg.LoadWithRootdir(false); err != nil { - return errors.Wrap("failed to load config", err) - } - } - return nil + PersistentPreRunE: func(cmd *cobra.Command, args []string) error { + return loadConfig(cfg) }, } diff --git a/cli/rpc.go b/cli/rpc.go deleted file mode 100644 index afb1a007e2..0000000000 --- a/cli/rpc.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "context" - - "github.com/spf13/cobra" - - "github.com/sourcenetwork/defradb/config" -) - -func MakeRPCCommand(cfg *config.Config) *cobra.Command { - var cmd = &cobra.Command{ - Use: "rpc", - Short: "Interact with a DefraDB node via RPC", - Long: "Interact with a DefraDB node via RPC.", - } - cmd.PersistentFlags().String( - "addr", cfg.Net.RPCAddress, - "RPC endpoint address", - ) - - if err := cfg.BindFlag("net.rpcaddress", cmd.PersistentFlags().Lookup("addr")); err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind net.rpcaddress", err) - } - return cmd -} diff --git a/cli/schema_add.go b/cli/schema_add.go index b5f28f15d3..b93427a883 100644 --- a/cli/schema_add.go +++ b/cli/schema_add.go @@ -11,21 +11,14 @@ package cli import ( - "encoding/json" + "fmt" "io" - "net/http" "os" - "strings" "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" ) -func MakeSchemaAddCommand(cfg *config.Config) *cobra.Command { +func MakeSchemaAddCommand() *cobra.Command { var schemaFile string var cmd = &cobra.Command{ Use: "add [schema]", @@ -42,117 +35,34 @@ Example: add from stdin: cat schema.graphql | defradb client schema add - Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, - RunE: func(cmd *cobra.Command, args []string) (err error) { - var schema string - fi, err := os.Stdin.Stat() - if err != nil { - return err - } - - if len(args) > 1 { - if err = cmd.Usage(); err != nil { - return err - } - return errors.New("too many arguments") - } + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - if schemaFile != "" { - buf, err := os.ReadFile(schemaFile) - if err != nil { - return errors.Wrap("failed to read schema file", err) - } - schema = string(buf) - } else if isFileInfoPipe(fi) && (len(args) == 0 || args[0] != "-") { - log.FeedbackInfo( - cmd.Context(), - "Run 'defradb client schema add -' to read from stdin."+ - " Example: 'cat schema.graphql | defradb client schema add -').", - ) - return nil - } else if len(args) == 0 { - err := cmd.Help() + var schema string + switch { + case schemaFile != "": + data, err := os.ReadFile(schemaFile) if err != nil { - return errors.Wrap("failed to print help", err) + return err } - return nil - } else if args[0] == "-" { - stdin, err := readStdin() + schema = string(data) + case len(args) > 0 && args[0] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) if err != nil { - return errors.Wrap("failed to read stdin", err) - } - if len(stdin) == 0 { - return errors.New("no schema in stdin provided") - } else { - schema = stdin + return err } - } else { + schema = string(data) + case len(args) > 0: schema = args[0] + default: + return fmt.Errorf("schema cannot be empty") } - if schema == "" { - return errors.New("empty schema provided") - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPath) + cols, err := store.AddSchema(cmd.Context(), schema) if err != nil { - return errors.Wrap("join paths failed", err) - } - - res, err := http.Post(endpoint.String(), "text", strings.NewReader(schema)) - if err != nil { - return errors.Wrap("failed to post schema", err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - graphlErr, err := hasGraphQLErrors(response) - if err != nil { - return errors.Wrap("failed to handle GraphQL errors", err) - } - if graphlErr { - indentedResult, err := indentJSON(response) - if err != nil { - return errors.Wrap("failed to pretty print result", err) - } - log.FeedbackError(cmd.Context(), indentedResult) - } else { - type schemaResponse struct { - Data struct { - Result string `json:"result"` - Collections []struct { - Name string `json:"name"` - ID string `json:"id"` - } `json:"collections"` - } `json:"data"` - } - r := schemaResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return errors.Wrap("failed to unmarshal response", err) - } - if r.Data.Result == "success" { - log.FeedbackInfo(cmd.Context(), "Successfully added schema.", logging.NewKV("Collections", r.Data.Collections)) - } - log.FeedbackInfo(cmd.Context(), r.Data.Result) - } + return err } - return nil + return writeJSON(cmd, cols) }, } cmd.Flags().StringVarP(&schemaFile, "file", "f", "", "File to load a schema from") diff --git a/cli/schema_describe.go b/cli/schema_describe.go new file mode 100644 index 0000000000..72d8eda474 --- /dev/null +++ b/cli/schema_describe.go @@ -0,0 +1,82 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" +) + +func MakeSchemaDescribeCommand() *cobra.Command { + var name string + var root string + var versionID string + + var cmd = &cobra.Command{ + Use: "describe", + Short: "View schema descriptions.", + Long: `Introspect schema types. + +Example: view all schemas + defradb client schema describe + +Example: view schemas by name + defradb client schema describe --name User + +Example: view schemas by root + defradb client schema describe --root bae123 + +Example: view a single schema by version id + defradb client schema describe --version bae123 + `, + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + var schemas []client.SchemaDescription + switch { + case versionID != "": + schema, err := store.GetSchemaByVersionID(cmd.Context(), versionID) + if err != nil { + return err + } + return writeJSON(cmd, schema) + + case root != "": + s, err := store.GetSchemasByRoot(cmd.Context(), root) + if err != nil { + return err + } + schemas = s + + case name != "": + s, err := store.GetSchemasByName(cmd.Context(), name) + if err != nil { + return err + } + schemas = s + + default: + s, err := store.GetAllSchemas(cmd.Context()) + if err != nil { + return err + } + schemas = s + } + + return writeJSON(cmd, schemas) + }, + } + cmd.PersistentFlags().StringVar(&name, "name", "", "Schema name") + cmd.PersistentFlags().StringVar(&root, "root", "", "Schema root") + cmd.PersistentFlags().StringVar(&versionID, "version", "", "Schema Version ID") + return cmd +} diff --git a/cli/schema_list.go b/cli/schema_list.go deleted file mode 100644 index 3a0e32bcce..0000000000 --- a/cli/schema_list.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package cli - -import ( - "encoding/json" - "io" - "net/http" - - "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" -) - -type schemaListResponse struct { - Data struct { - Collections []struct { - Name string `json:"name"` - ID string `json:"id"` - VersionID string `json:"version_id"` - Fields []struct { - ID string `json:"id"` - Name string `json:"name"` - Kind string `json:"kind"` - Internal bool `json:"internal"` - } `json:"fields"` - } `json:"collections"` - } `json:"data"` - Errors []struct { - Message string `json:"message"` - } `json:"errors"` -} - -func MakeSchemaListCommand(cfg *config.Config) *cobra.Command { - var cmd = &cobra.Command{ - Use: "list", - Short: "List schema types with their respective fields", - RunE: func(cmd *cobra.Command, args []string) (err error) { - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPath) - if err != nil { - return NewErrFailedToJoinEndpoint(err) - } - - res, err := http.Get(endpoint.String()) - if err != nil { - return NewErrFailedToSendRequest(err) - } - defer res.Body.Close() //nolint:errcheck - - data, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - var r schemaListResponse - if err := json.Unmarshal(data, &r); err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - return errors.New("failed to list schemas", errors.NewKV("errors", r.Errors)) - } - - for _, c := range r.Data.Collections { - cmd.Printf("# Schema ID: %s\n", c.ID) - cmd.Printf("# Version ID: %s\n", c.VersionID) - cmd.Printf("type %s {\n", c.Name) - for _, f := range c.Fields { - if !f.Internal { - cmd.Printf("\t%s: %s\n", f.Name, f.Kind) - } - } - cmd.Printf("}\n\n") - } - - return nil - }, - } - return cmd -} diff --git a/cli/schema_migration_down.go b/cli/schema_migration_down.go new file mode 100644 index 0000000000..1dcb5e64da --- /dev/null +++ b/cli/schema_migration_down.go @@ -0,0 +1,91 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "os" + + "github.com/sourcenetwork/immutable/enumerable" + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/datastore" +) + +func MakeSchemaMigrationDownCommand() *cobra.Command { + var file string + var schemaVersionID string + var cmd = &cobra.Command{ + Use: "down --version ", + Short: "Reverses the migration from the specified schema version.", + Long: `Reverses the migration from the specified schema version. +Documents is a list of documents to reverse the migration from. + +Example: migrate from string + defradb client schema migration down --version bae123 '[{"name": "Bob"}]' + +Example: migrate from file + defradb client schema migration down --version bae123 -f documents.json + +Example: migrate from stdin + cat documents.json | defradb client schema migration down --version bae123 - + `, + Args: cobra.RangeArgs(0, 1), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + var srcData []byte + switch { + case file != "": + data, err := os.ReadFile(file) + if err != nil { + return err + } + srcData = data + case len(args) == 1 && args[0] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) + if err != nil { + return err + } + srcData = data + case len(args) == 1: + srcData = []byte(args[0]) + default: + return ErrNoDocOrFile + } + + var src []map[string]any + if err := json.Unmarshal(srcData, &src); err != nil { + return err + } + lens := store.LensRegistry() + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + lens = lens.WithTxn(tx) + } + out, err := lens.MigrateDown(cmd.Context(), enumerable.New(src), schemaVersionID) + if err != nil { + return err + } + var value []map[string]any + err = enumerable.ForEach(out, func(item map[string]any) { + value = append(value, item) + }) + if err != nil { + return err + } + return writeJSON(cmd, value) + }, + } + cmd.Flags().StringVarP(&file, "file", "f", "", "File containing document(s)") + cmd.Flags().StringVar(&schemaVersionID, "version", "", "Schema version id") + return cmd +} diff --git a/cli/schema_migration_get.go b/cli/schema_migration_get.go index 333c2d9cf4..43b66599b7 100644 --- a/cli/schema_migration_get.go +++ b/cli/schema_migration_get.go @@ -11,21 +11,10 @@ package cli import ( - "encoding/json" - "io" - "net/http" - "os" - "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" ) -func MakeSchemaMigrationGetCommand(cfg *config.Config) *cobra.Command { +func MakeSchemaMigrationGetCommand() *cobra.Command { var cmd = &cobra.Command{ Use: "get", Short: "Gets the schema migrations within DefraDB", @@ -35,63 +24,14 @@ Example: defradb client schema migration get' Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, - RunE: func(cmd *cobra.Command, args []string) (err error) { - if err := cobra.NoArgs(cmd, args); err != nil { - return NewErrTooManyArgs(0, len(args)) - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaMigrationPath) - if err != nil { - return errors.Wrap("join paths failed", err) - } + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - res, err := http.Get(endpoint.String()) + cfgs, err := store.LensRegistry().Config(cmd.Context()) if err != nil { - return errors.Wrap("failed to get schema migrations", err) + return err } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - type migrationGetResponse struct { - Data struct { - Configuration []client.LensConfig `json:"configuration"` - } `json:"data"` - Errors []struct { - Message string `json:"message"` - } `json:"errors"` - } - r := migrationGetResponse{} - err = json.Unmarshal(response, &r) - log.FeedbackInfo(cmd.Context(), string(response)) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to get schema migrations", - logging.NewKV("Errors", r.Errors)) - } else { - log.FeedbackInfo(cmd.Context(), "Successfully got schema migrations", - logging.NewKV("Configuration", r.Data.Configuration)) - } - } - - return nil + return writeJSON(cmd, cfgs) }, } return cmd diff --git a/cli/schema_migration_reload.go b/cli/schema_migration_reload.go new file mode 100644 index 0000000000..d04aebed65 --- /dev/null +++ b/cli/schema_migration_reload.go @@ -0,0 +1,35 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/datastore" +) + +func MakeSchemaMigrationReloadCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "reload", + Short: "Reload the schema migrations within DefraDB", + Long: `Reload the schema migrations within DefraDB`, + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + lens := store.LensRegistry() + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + lens = lens.WithTxn(tx) + } + return lens.ReloadLenses(cmd.Context()) + }, + } + return cmd +} diff --git a/cli/schema_migration_set.go b/cli/schema_migration_set.go index 633cbf0115..280130b8db 100644 --- a/cli/schema_migration_set.go +++ b/cli/schema_migration_set.go @@ -13,21 +13,16 @@ package cli import ( "encoding/json" "io" - "net/http" "os" "strings" "github.com/lens-vm/lens/host-go/config/model" "github.com/spf13/cobra" - httpapi "github.com/sourcenetwork/defradb/api/http" "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" ) -func MakeSchemaMigrationSetCommand(cfg *config.Config) *cobra.Command { +func MakeSchemaMigrationSetCommand() *cobra.Command { var lensFile string var cmd = &cobra.Command{ Use: "set [src] [dst] [cfg]", @@ -44,73 +39,39 @@ Example: add from stdin: cat schema_migration.lens | defradb client schema migration set bae123 bae456 - Learn more about the DefraDB GraphQL Schema Language on https://docs.source.network.`, - RunE: func(cmd *cobra.Command, args []string) (err error) { - if err := cobra.MinimumNArgs(2)(cmd, args); err != nil { - return NewErrMissingArgs([]string{"src", "dst", "cfg"}) - } - if err := cobra.MaximumNArgs(3)(cmd, args); err != nil { - return NewErrTooManyArgs(3, len(args)) - } + Args: cobra.RangeArgs(2, 3), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) var lensCfgJson string - var srcSchemaVersionID string - var dstSchemaVersionID string - fi, err := os.Stdin.Stat() - if err != nil { - return err - } - - if lensFile != "" { - buf, err := os.ReadFile(lensFile) + switch { + case lensFile != "": + data, err := os.ReadFile(lensFile) if err != nil { - return errors.Wrap("failed to read schema file", err) + return err } - lensCfgJson = string(buf) - } else if len(args) == 2 { - // If the lensFile flag has not been provided then it must be provided as an arg - // and thus len(args) cannot be 2 - return NewErrMissingArg("cfg") - } else if isFileInfoPipe(fi) && args[2] != "-" { - log.FeedbackInfo( - cmd.Context(), - "Run 'defradb client schema migration set -' to read from stdin."+ - " Example: 'cat schema_migration.lens | defradb client schema migration set -').", - ) - return nil - } else if args[2] == "-" { - stdin, err := readStdin() + lensCfgJson = string(data) + case len(args) == 3 && args[2] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) if err != nil { - return errors.Wrap("failed to read stdin", err) + return err } - if len(stdin) == 0 { - return errors.New("no lens cfg in stdin provided") - } else { - lensCfgJson = stdin - } - } else { + lensCfgJson = string(data) + case len(args) == 3: lensCfgJson = args[2] + default: + return ErrNoLensConfig } - srcSchemaVersionID = args[0] - dstSchemaVersionID = args[1] - - if lensCfgJson == "" { - return NewErrMissingArg("cfg") - } - if srcSchemaVersionID == "" { - return NewErrMissingArg("src") - } - if dstSchemaVersionID == "" { - return NewErrMissingArg("dst") - } + srcSchemaVersionID := args[0] + dstSchemaVersionID := args[1] decoder := json.NewDecoder(strings.NewReader(lensCfgJson)) decoder.DisallowUnknownFields() var lensCfg model.Lens - err = decoder.Decode(&lensCfg) - if err != nil { - return errors.Wrap("invalid lens configuration", err) + if err := decoder.Decode(&lensCfg); err != nil { + return NewErrInvalidLensConfig(err) } migrationCfg := client.LensConfig{ @@ -119,58 +80,7 @@ Learn more about the DefraDB GraphQL Schema Language on https://docs.source.netw Lens: lensCfg, } - migrationCfgJson, err := json.Marshal(migrationCfg) - if err != nil { - return errors.Wrap("failed to marshal cfg", err) - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaMigrationPath) - if err != nil { - return errors.Wrap("join paths failed", err) - } - - res, err := http.Post(endpoint.String(), "application/json", strings.NewReader(string(migrationCfgJson))) - if err != nil { - return errors.Wrap("failed to post schema migration", err) - } - - defer func() { - if e := res.Body.Close(); e != nil { - err = NewErrFailedToCloseResponseBody(e, err) - } - }() - - response, err := io.ReadAll(res.Body) - if err != nil { - return errors.Wrap("failed to read response body", err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return errors.Wrap("failed to stat stdout", err) - } - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - type migrationSetResponse struct { - Errors []struct { - Message string `json:"message"` - } `json:"errors"` - } - r := migrationSetResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - if len(r.Errors) > 0 { - log.FeedbackError(cmd.Context(), "Failed to set schema migration", - logging.NewKV("Errors", r.Errors)) - } else { - log.FeedbackInfo(cmd.Context(), "Successfully set schema migration") - } - } - - return nil + return store.LensRegistry().SetMigration(cmd.Context(), migrationCfg) }, } cmd.Flags().StringVarP(&lensFile, "file", "f", "", "Lens configuration file") diff --git a/cli/schema_migration_up.go b/cli/schema_migration_up.go new file mode 100644 index 0000000000..3b0b522349 --- /dev/null +++ b/cli/schema_migration_up.go @@ -0,0 +1,91 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "encoding/json" + "io" + "os" + + "github.com/sourcenetwork/immutable/enumerable" + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/datastore" +) + +func MakeSchemaMigrationUpCommand() *cobra.Command { + var file string + var schemaVersionID string + var cmd = &cobra.Command{ + Use: "up --version ", + Short: "Applies the migration to the specified schema version.", + Long: `Applies the migration to the specified schema version. +Documents is a list of documents to apply the migration to. + +Example: migrate from string + defradb client schema migration up --version bae123 '[{"name": "Bob"}]' + +Example: migrate from file + defradb client schema migration up --version bae123 -f documents.json + +Example: migrate from stdin + cat documents.json | defradb client schema migration up --version bae123 - + `, + Args: cobra.RangeArgs(0, 1), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + + var srcData []byte + switch { + case file != "": + data, err := os.ReadFile(file) + if err != nil { + return err + } + srcData = data + case len(args) == 1 && args[0] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) + if err != nil { + return err + } + srcData = data + case len(args) == 1: + srcData = []byte(args[0]) + default: + return ErrNoDocOrFile + } + + var src []map[string]any + if err := json.Unmarshal(srcData, &src); err != nil { + return err + } + lens := store.LensRegistry() + if tx, ok := cmd.Context().Value(txContextKey).(datastore.Txn); ok { + lens = lens.WithTxn(tx) + } + out, err := lens.MigrateUp(cmd.Context(), enumerable.New(src), schemaVersionID) + if err != nil { + return err + } + var value []map[string]any + err = enumerable.ForEach(out, func(item map[string]any) { + value = append(value, item) + }) + if err != nil { + return err + } + return writeJSON(cmd, value) + }, + } + cmd.Flags().StringVarP(&file, "file", "f", "", "File containing document(s)") + cmd.Flags().StringVar(&schemaVersionID, "version", "", "Schema version id") + return cmd +} diff --git a/cli/schema_patch.go b/cli/schema_patch.go index b1e962c51a..70f4283c85 100644 --- a/cli/schema_patch.go +++ b/cli/schema_patch.go @@ -11,21 +11,16 @@ package cli import ( - "encoding/json" + "fmt" "io" - "net/http" "os" - "strings" "github.com/spf13/cobra" - - httpapi "github.com/sourcenetwork/defradb/api/http" - "github.com/sourcenetwork/defradb/config" ) -func MakeSchemaPatchCommand(cfg *config.Config) *cobra.Command { +func MakeSchemaPatchCommand() *cobra.Command { var patchFile string - + var setDefault bool var cmd = &cobra.Command{ Use: "patch [schema]", Short: "Patch an existing schema type", @@ -43,113 +38,33 @@ Example: patch from stdin: cat patch.json | defradb client schema patch - To learn more about the DefraDB GraphQL Schema Language, refer to https://docs.source.network.`, - RunE: func(cmd *cobra.Command, args []string) (err error) { - var patch string - fi, err := os.Stdin.Stat() - if err != nil { - return err - } + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) - if len(args) > 1 { - if err = cmd.Usage(); err != nil { - return err - } - return NewErrTooManyArgs(1, len(args)) - } - - if patchFile != "" { - buf, err := os.ReadFile(patchFile) + var patch string + switch { + case patchFile != "": + data, err := os.ReadFile(patchFile) if err != nil { - return NewFailedToReadFile(err) + return err } - patch = string(buf) - } else if isFileInfoPipe(fi) && (len(args) == 0 || args[0] != "-") { - log.FeedbackInfo( - cmd.Context(), - "Run 'defradb client schema patch -' to read from stdin."+ - " Example: 'cat patch.json | defradb client schema patch -').", - ) - return nil - } else if len(args) == 0 { - // ignore error, nothing we can do about it - // as printing an error about failing to print help - // is useless - //nolint:errcheck - cmd.Help() - return nil - } else if args[0] == "-" { - stdin, err := readStdin() + patch = string(data) + case len(args) > 0 && args[0] == "-": + data, err := io.ReadAll(cmd.InOrStdin()) if err != nil { - return NewFailedToReadStdin(err) - } - if len(stdin) == 0 { - return ErrEmptyStdin - } else { - patch = stdin + return err } - } else { + patch = string(data) + case len(args) > 0: patch = args[0] + default: + return fmt.Errorf("patch cannot be empty") } - if patch == "" { - return ErrEmptyFile - } - - endpoint, err := httpapi.JoinPaths(cfg.API.AddressToURL(), httpapi.SchemaPath) - if err != nil { - return err - } - - req, err := http.NewRequest(http.MethodPatch, endpoint.String(), strings.NewReader(patch)) - if err != nil { - return NewErrFailedToSendRequest(err) - } - res, err := http.DefaultClient.Do(req) - if err != nil { - return NewErrFailedToSendRequest(err) - } - - //nolint:errcheck - defer res.Body.Close() - response, err := io.ReadAll(res.Body) - if err != nil { - return NewErrFailedToReadResponseBody(err) - } - - stdout, err := os.Stdout.Stat() - if err != nil { - return NewErrFailedToStatStdOut(err) - } - if isFileInfoPipe(stdout) { - cmd.Println(string(response)) - } else { - graphlErr, err := hasGraphQLErrors(response) - if err != nil { - return NewErrFailedToHandleGQLErrors(err) - } - if graphlErr { - indentedResult, err := indentJSON(response) - if err != nil { - return NewErrFailedToPrettyPrintResponse(err) - } - log.FeedbackError(cmd.Context(), indentedResult) - } else { - type schemaResponse struct { - Data struct { - Result string `json:"result"` - } `json:"data"` - } - r := schemaResponse{} - err = json.Unmarshal(response, &r) - if err != nil { - return NewErrFailedToUnmarshalResponse(err) - } - log.FeedbackInfo(cmd.Context(), r.Data.Result) - } - } - return nil + return store.PatchSchema(cmd.Context(), patch, setDefault) }, } + cmd.Flags().BoolVar(&setDefault, "set-default", false, "Set default schema version") cmd.Flags().StringVarP(&patchFile, "file", "f", "", "File to load a patch from") return cmd } diff --git a/cli/schema_set_default.go b/cli/schema_set_default.go new file mode 100644 index 0000000000..cdb6bd8bd8 --- /dev/null +++ b/cli/schema_set_default.go @@ -0,0 +1,29 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" +) + +func MakeSchemaSetDefaultCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "set-default [versionID]", + Short: "Set the default schema version", + Long: `Set the default schema version`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) error { + store := mustGetStoreContext(cmd) + return store.SetDefaultSchemaVersion(cmd.Context(), args[0]) + }, + } + return cmd +} diff --git a/cli/serverdump.go b/cli/server_dump.go similarity index 100% rename from cli/serverdump.go rename to cli/server_dump.go diff --git a/cli/start.go b/cli/start.go index 9185af8c92..62cc281dbf 100644 --- a/cli/start.go +++ b/cli/start.go @@ -13,34 +13,30 @@ package cli import ( "context" "fmt" - gonet "net" "net/http" "os" "os/signal" + "path/filepath" "strings" "syscall" - badger "github.com/dgraph-io/badger/v4" - grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - grpc_recovery "github.com/grpc-ecosystem/go-grpc-middleware/recovery" - ma "github.com/multiformats/go-multiaddr" + badger "github.com/sourcenetwork/badger/v4" "github.com/spf13/cobra" - "google.golang.org/grpc" - "google.golang.org/grpc/keepalive" - httpapi "github.com/sourcenetwork/defradb/api/http" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" ds "github.com/sourcenetwork/defradb/datastore" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" + httpapi "github.com/sourcenetwork/defradb/http" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" - netpb "github.com/sourcenetwork/defradb/net/pb" netutils "github.com/sourcenetwork/defradb/net/utils" ) +const badgerDatastoreName = "badger" + func MakeStartCommand(cfg *config.Config) *cobra.Command { var cmd = &cobra.Command{ Use: "start", @@ -48,27 +44,11 @@ func MakeStartCommand(cfg *config.Config) *cobra.Command { Long: "Start a DefraDB node.", // Load the root config if it exists, otherwise create it. PersistentPreRunE: func(cmd *cobra.Command, _ []string) error { - if err := cfg.LoadRootDirFromFlagOrDefault(); err != nil { + if err := loadConfig(cfg); err != nil { return err } - if cfg.ConfigFileExists() { - if err := cfg.LoadWithRootdir(true); err != nil { - return config.NewErrLoadingConfig(err) - } - log.FeedbackInfo(cmd.Context(), fmt.Sprintf("Configuration loaded from DefraDB directory %v", cfg.Rootdir)) - } else { - if err := cfg.LoadWithRootdir(false); err != nil { - return config.NewErrLoadingConfig(err) - } - if config.FolderExists(cfg.Rootdir) { - if err := cfg.WriteConfigFile(); err != nil { - return err - } - } else { - if err := cfg.CreateRootDirAndConfigFile(); err != nil { - return err - } - } + if !cfg.ConfigFileExists() { + return createConfig(cfg) } return nil }, @@ -127,15 +107,6 @@ func MakeStartCommand(cfg *config.Config) *cobra.Command { log.FeedbackFatalE(context.Background(), "Could not bind net.p2paddress", err) } - cmd.Flags().String( - "tcpaddr", cfg.Net.TCPAddress, - "Listener address for the tcp gRPC server (formatted as a libp2p MultiAddr)", - ) - err = cfg.BindFlag("net.tcpaddress", cmd.Flags().Lookup("tcpaddr")) - if err != nil { - log.FeedbackFatalE(context.Background(), "Could not bind net.tcpaddress", err) - } - cmd.Flags().Bool( "no-p2p", cfg.Net.P2PDisabled, "Disable the peer-to-peer network synchronization system", @@ -200,15 +171,10 @@ type defraInstance struct { func (di *defraInstance) close(ctx context.Context) { if di.node != nil { - if err := di.node.Close(); err != nil { - log.FeedbackInfo( - ctx, - "The node could not be closed successfully", - logging.NewKV("Error", err.Error()), - ) - } + di.node.Close() + } else { + di.db.Close() } - di.db.Close(ctx) if err := di.server.Close(); err != nil { log.FeedbackInfo( ctx, @@ -251,16 +217,26 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { } // init the p2p node - var n *net.Node + var node *net.Node if !cfg.Net.P2PDisabled { - log.FeedbackInfo(ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) - n, err = net.NewNode( - ctx, - db, + nodeOpts := []net.NodeOpt{ net.WithConfig(cfg), - ) + } + if cfg.Datastore.Store == badgerDatastoreName { + // It would be ideal to not have the key path tied to the datastore. + // Running with memory store mode will always generate a random key. + // Adding support for an ephemeral mode and moving the key to the + // config would solve both of these issues. + key, err := loadOrGeneratePrivateKey(filepath.Join(cfg.Rootdir, "data", "key")) + if err != nil { + return nil, err + } + nodeOpts = append(nodeOpts, net.WithPrivateKey(key)) + } + log.FeedbackInfo(ctx, "Starting P2P node", logging.NewKV("P2P address", cfg.Net.P2PAddress)) + node, err = net.NewNode(ctx, db, nodeOpts...) if err != nil { - db.Close(ctx) + db.Close() return nil, errors.Wrap("failed to start P2P node", err) } @@ -272,55 +248,13 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { return nil, errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %v", cfg.Net.Peers), err) } log.Debug(ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) - n.Boostrap(addrs) + node.Bootstrap(addrs) } - if err := n.Start(); err != nil { - if e := n.Close(); e != nil { - err = errors.Wrap(fmt.Sprintf("failed to close node: %v", e.Error()), err) - } - db.Close(ctx) + if err := node.Start(); err != nil { + node.Close() return nil, errors.Wrap("failed to start P2P listeners", err) } - - MtcpAddr, err := ma.NewMultiaddr(cfg.Net.TCPAddress) - if err != nil { - return nil, errors.Wrap("failed to parse multiaddress", err) - } - addr, err := netutils.TCPAddrFromMultiAddr(MtcpAddr) - if err != nil { - return nil, errors.Wrap("failed to parse TCP address", err) - } - - rpcTimeoutDuration, err := cfg.Net.RPCTimeoutDuration() - if err != nil { - return nil, errors.Wrap("failed to parse RPC timeout duration", err) - } - - server := grpc.NewServer( - grpc.UnaryInterceptor( - grpc_middleware.ChainUnaryServer( - grpc_recovery.UnaryServerInterceptor(), - ), - ), - grpc.KeepaliveParams( - keepalive.ServerParameters{ - MaxConnectionIdle: rpcTimeoutDuration, - }, - ), - ) - tcplistener, err := gonet.Listen("tcp", addr) - if err != nil { - return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", addr), err) - } - - go func() { - log.FeedbackInfo(ctx, "Started RPC server", logging.NewKV("Address", addr)) - netpb.RegisterCollectionServer(server, n.Peer) - if err := server.Serve(tcplistener); err != nil && !errors.Is(err, grpc.ErrServerStopped) { - log.FeedbackFatalE(ctx, "Failed to start RPC server", err) - } - }() } sOpt := []func(*httpapi.Server){ @@ -329,10 +263,6 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { httpapi.WithAllowedOrigins(cfg.API.AllowedOrigins...), } - if n != nil { - sOpt = append(sOpt, httpapi.WithPeerID(n.PeerID().String())) - } - if cfg.API.TLS { sOpt = append( sOpt, @@ -342,41 +272,39 @@ func start(ctx context.Context, cfg *config.Config) (*defraInstance, error) { ) } - s := httpapi.NewServer(db, sOpt...) - if err := s.Listen(ctx); err != nil { - return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", s.Addr), err) + var server *httpapi.Server + if node != nil { + server, err = httpapi.NewServer(node, sOpt...) + } else { + server, err = httpapi.NewServer(db, sOpt...) + } + if err != nil { + return nil, errors.Wrap("failed to create http server", err) + } + if err := server.Listen(ctx); err != nil { + return nil, errors.Wrap(fmt.Sprintf("failed to listen on TCP address %v", server.Addr), err) } // save the address on the config in case the port number was set to random - cfg.API.Address = s.AssignedAddr() + cfg.API.Address = server.AssignedAddr() // run the server in a separate goroutine go func() { - log.FeedbackInfo( - ctx, - fmt.Sprintf( - "Providing HTTP API at %s%s. Use the GraphQL request endpoint at %s%s/graphql ", - cfg.API.AddressToURL(), - httpapi.RootPath, - cfg.API.AddressToURL(), - httpapi.RootPath, - ), - ) - if err := s.Run(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) { + log.FeedbackInfo(ctx, fmt.Sprintf("Providing HTTP API at %s.", cfg.API.AddressToURL())) + if err := server.Run(ctx); err != nil && !errors.Is(err, http.ErrServerClosed) { log.FeedbackErrorE(ctx, "Failed to run the HTTP server", err) - if n != nil { - if err := n.Close(); err != nil { - log.FeedbackErrorE(ctx, "Failed to close node", err) - } + if node != nil { + node.Close() + } else { + db.Close() } - db.Close(ctx) os.Exit(1) } }() return &defraInstance{ - node: n, + node: node, db: db, - server: s, + server: server, }, nil } diff --git a/api/http/http.go b/cli/tx.go similarity index 51% rename from api/http/http.go rename to cli/tx.go index 3ac3d62bdd..b4d278df6d 100644 --- a/api/http/http.go +++ b/cli/tx.go @@ -1,4 +1,4 @@ -// Copyright 2022 Democratized Data Foundation +// Copyright 2023 Democratized Data Foundation // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -8,11 +8,18 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. -/* -Package http provides DefraDB's HTTP API, offering various capabilities. -*/ -package http +package cli -import "github.com/sourcenetwork/defradb/logging" +import ( + "github.com/spf13/cobra" +) -var log = logging.MustNewLogger("http") +func MakeTxCommand() *cobra.Command { + var cmd = &cobra.Command{ + Use: "tx", + Short: "Create, commit, and discard DefraDB transactions", + Long: `Create, commit, and discard DefraDB transactions`, + } + + return cmd +} diff --git a/cli/tx_commit.go b/cli/tx_commit.go new file mode 100644 index 0000000000..260a274a08 --- /dev/null +++ b/cli/tx_commit.go @@ -0,0 +1,41 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "strconv" + + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/http" +) + +func MakeTxCommitCommand(cfg *config.Config) *cobra.Command { + var cmd = &cobra.Command{ + Use: "commit [id]", + Short: "Commit a DefraDB transaction.", + Long: `Commit a DefraDB transaction.`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + id, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return err + } + tx, err := http.NewTransaction(cfg.API.Address, id) + if err != nil { + return err + } + return tx.Commit(cmd.Context()) + }, + } + return cmd +} diff --git a/cli/tx_create.go b/cli/tx_create.go new file mode 100644 index 0000000000..987a784077 --- /dev/null +++ b/cli/tx_create.go @@ -0,0 +1,46 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/datastore" +) + +func MakeTxCreateCommand(cfg *config.Config) *cobra.Command { + var concurrent bool + var readOnly bool + var cmd = &cobra.Command{ + Use: "create", + Short: "Create a new DefraDB transaction.", + Long: `Create a new DefraDB transaction.`, + RunE: func(cmd *cobra.Command, args []string) (err error) { + db := cmd.Context().Value(dbContextKey).(client.DB) + + var tx datastore.Txn + if concurrent { + tx, err = db.NewConcurrentTxn(cmd.Context(), readOnly) + } else { + tx, err = db.NewTxn(cmd.Context(), readOnly) + } + if err != nil { + return err + } + return writeJSON(cmd, map[string]any{"id": tx.ID()}) + }, + } + cmd.Flags().BoolVar(&concurrent, "concurrent", false, "Transaction is concurrent") + cmd.Flags().BoolVar(&readOnly, "read-only", false, "Transaction is read only") + return cmd +} diff --git a/cli/tx_discard.go b/cli/tx_discard.go new file mode 100644 index 0000000000..351f919f53 --- /dev/null +++ b/cli/tx_discard.go @@ -0,0 +1,42 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "strconv" + + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/http" +) + +func MakeTxDiscardCommand(cfg *config.Config) *cobra.Command { + var cmd = &cobra.Command{ + Use: "discard [id]", + Short: "Discard a DefraDB transaction.", + Long: `Discard a DefraDB transaction.`, + Args: cobra.ExactArgs(1), + RunE: func(cmd *cobra.Command, args []string) (err error) { + id, err := strconv.ParseUint(args[0], 10, 64) + if err != nil { + return err + } + tx, err := http.NewTransaction(cfg.API.Address, id) + if err != nil { + return err + } + tx.Discard(cmd.Context()) + return nil + }, + } + return cmd +} diff --git a/cli/utils.go b/cli/utils.go new file mode 100644 index 0000000000..8c1a40dc1f --- /dev/null +++ b/cli/utils.go @@ -0,0 +1,157 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "encoding/json" + "os" + + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/spf13/cobra" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/http" +) + +type contextKey string + +var ( + // txContextKey is the context key for the datastore.Txn + // + // This will only be set if a transaction id is specified. + txContextKey = contextKey("tx") + // dbContextKey is the context key for the client.DB + dbContextKey = contextKey("db") + // storeContextKey is the context key for the client.Store + // + // If a transaction exists, all operations will be executed + // in the current transaction context. + storeContextKey = contextKey("store") + // colContextKey is the context key for the client.Collection + // + // If a transaction exists, all operations will be executed + // in the current transaction context. + colContextKey = contextKey("col") +) + +// mustGetStoreContext returns the store for the current command context. +// +// If a store is not set in the current context this function panics. +func mustGetStoreContext(cmd *cobra.Command) client.Store { + return cmd.Context().Value(storeContextKey).(client.Store) +} + +// mustGetP2PContext returns the p2p implementation for the current command context. +// +// If a p2p implementation is not set in the current context this function panics. +func mustGetP2PContext(cmd *cobra.Command) client.P2P { + return cmd.Context().Value(dbContextKey).(client.P2P) +} + +// tryGetCollectionContext returns the collection for the current command context +// and a boolean indicating if the collection was set. +func tryGetCollectionContext(cmd *cobra.Command) (client.Collection, bool) { + col, ok := cmd.Context().Value(colContextKey).(client.Collection) + return col, ok +} + +// setTransactionContext sets the transaction for the current command context. +func setTransactionContext(cmd *cobra.Command, cfg *config.Config, txId uint64) error { + if txId == 0 { + return nil + } + tx, err := http.NewTransaction(cfg.API.Address, txId) + if err != nil { + return err + } + ctx := context.WithValue(cmd.Context(), txContextKey, tx) + cmd.SetContext(ctx) + return nil +} + +// setStoreContext sets the store for the current command context. +func setStoreContext(cmd *cobra.Command, cfg *config.Config) error { + db, err := http.NewClient(cfg.API.Address) + if err != nil { + return err + } + ctx := context.WithValue(cmd.Context(), dbContextKey, db) + if tx, ok := ctx.Value(txContextKey).(datastore.Txn); ok { + ctx = context.WithValue(ctx, storeContextKey, db.WithTxn(tx)) + } else { + ctx = context.WithValue(ctx, storeContextKey, db) + } + cmd.SetContext(ctx) + return nil +} + +// loadConfig loads the rootDir containing the configuration file, +// otherwise warn about it and load a default configuration. +func loadConfig(cfg *config.Config) error { + if err := cfg.LoadRootDirFromFlagOrDefault(); err != nil { + return err + } + return cfg.LoadWithRootdir(cfg.ConfigFileExists()) +} + +// createConfig creates the config directories and writes +// the current config to a file. +func createConfig(cfg *config.Config) error { + if config.FolderExists(cfg.Rootdir) { + return cfg.WriteConfigFile() + } + return cfg.CreateRootDirAndConfigFile() +} + +// loadOrGeneratePrivateKey loads the private key from the given path +// or generates a new key and writes it to a file at the given path. +func loadOrGeneratePrivateKey(path string) (crypto.PrivKey, error) { + key, err := loadPrivateKey(path) + if err == nil { + return key, nil + } + if os.IsNotExist(err) { + return generatePrivateKey(path) + } + return nil, err +} + +// generatePrivateKey generates a new private key and writes it +// to a file at the given path. +func generatePrivateKey(path string) (crypto.PrivKey, error) { + key, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) + if err != nil { + return nil, err + } + data, err := crypto.MarshalPrivateKey(key) + if err != nil { + return nil, err + } + return key, os.WriteFile(path, data, 0644) +} + +// loadPrivateKey reads the private key from the file at the given path. +func loadPrivateKey(path string) (crypto.PrivKey, error) { + data, err := os.ReadFile(path) + if err != nil { + return nil, err + } + return crypto.UnmarshalPrivateKey(data) +} + +func writeJSON(cmd *cobra.Command, out any) error { + enc := json.NewEncoder(cmd.OutOrStdout()) + enc.SetIndent("", " ") + return enc.Encode(out) +} diff --git a/cli/version.go b/cli/version.go index 8842697699..f61ecbc9d5 100644 --- a/cli/version.go +++ b/cli/version.go @@ -11,9 +11,7 @@ package cli import ( - "bytes" - - "encoding/json" + "strings" "github.com/spf13/cobra" @@ -31,25 +29,17 @@ func MakeVersionCommand() *cobra.Command { if err != nil { return err } - switch format { - case "json": - var buf bytes.Buffer - dvj, err := json.Marshal(dv) - if err != nil { - return err - } - err = json.Indent(&buf, dvj, "", " ") - if err != nil { - return err - } - cmd.Println(buf.String()) - default: - if full { - cmd.Println(dv.StringFull()) - } else { - cmd.Println(dv.String()) - } + + if strings.ToLower(format) == "json" { + return writeJSON(cmd, dv) } + + if full { + cmd.Println(dv.StringFull()) + } else { + cmd.Println(dv.String()) + } + return nil }, } diff --git a/client/collection.go b/client/collection.go index 9c91dccb7c..3e6bb64cc4 100644 --- a/client/collection.go +++ b/client/collection.go @@ -16,6 +16,14 @@ import ( "github.com/sourcenetwork/defradb/datastore" ) +// CollectionDefinition contains the metadata defining what a Collection is. +type CollectionDefinition struct { + // Description returns the CollectionDescription of this Collection. + Description CollectionDescription `json:"description"` + // Schema returns the SchemaDescription used to define this Collection. + Schema SchemaDescription `json:"schema"` +} + // Collection represents a defradb collection. // // A Collection is mostly analogous to a SQL table, however a collection is specific to its @@ -23,16 +31,19 @@ import ( // // Many functions on this object will interact with the underlying datastores. type Collection interface { - // Description returns the CollectionDescription of this Collection. - Description() CollectionDescription // Name returns the name of this collection. Name() string - // Schema returns the SchemaDescription used to define this Collection. - Schema() SchemaDescription // ID returns the ID of this Collection. ID() uint32 - // SchemaID returns the ID of the Schema used to define this Collection. - SchemaID() string + // SchemaRoot returns the Root of the Schema used to define this Collection. + SchemaRoot() string + + // Definition contains the metadata defining what a Collection is. + Definition() CollectionDefinition + // Schema returns the SchemaDescription used to define this Collection. + Schema() SchemaDescription + // Description returns the CollectionDescription of this Collection. + Description() CollectionDescription // Create a new document. // diff --git a/client/db.go b/client/db.go index ba4dd0b89d..b1b63f29d6 100644 --- a/client/db.go +++ b/client/db.go @@ -51,6 +51,11 @@ type DB interface { // It sits within the rootstore returned by [Root]. Blockstore() blockstore.Blockstore + // Peerstore returns the peerstore where known host information is stored. + // + // It sits within the rootstore returned by [Root]. + Peerstore() datastore.DSBatching + // Close closes the database instance and releases any resources held. // // The behaviour of other functions in this package after this function has been called is undefined @@ -60,7 +65,7 @@ type DB interface { // be created after calling this to resume operations on the prior data - this is however dependant on // the behaviour of the rootstore provided on database instance creation, as this function will Close // the provided rootstore. - Close(context.Context) + Close() // Events returns the database event queue. // @@ -82,9 +87,6 @@ type DB interface { // Store contains the core DefraDB read-write operations. type Store interface { - // P2P holds the P2P related methods that must be implemented by the database. - P2P - // Backup holds the backup related methods that must be implemented by the database. Backup @@ -95,8 +97,9 @@ type Store interface { // types previously defined. AddSchema(context.Context, string) ([]CollectionDescription, error) - // PatchSchema takes the given JSON patch string and applies it to the set of CollectionDescriptions - // present in the database. + // PatchSchema takes the given JSON patch string and applies it to the set of SchemaDescriptions + // present in the database. If true is provided, the new schema versions will be made default, otherwise + // [SetDefaultSchemaVersion] should be called to set them so. // // It will also update the GQL types used by the query system. It will error and not apply any of the // requested, valid updates should the net result of the patch result in an invalid state. The @@ -109,7 +112,16 @@ type Store interface { // // Field [FieldKind] values may be provided in either their raw integer form, or as string as per // [FieldKindStringToEnumMapping]. - PatchSchema(context.Context, string) error + PatchSchema(context.Context, string, bool) error + + // SetDefaultSchemaVersion sets the default schema version to the ID provided. It will be applied to all + // collections using the schema. + // + // This will affect all operations interacting with the schema where a schema version is not explicitly + // provided. This includes GQL queries and Collection operations. + // + // It will return an error if the provided schema version ID does not exist. + SetDefaultSchemaVersion(context.Context, string) error // SetMigration sets the migration for the given source-destination schema version IDs. Is equivilent to // calling `LensRegistry().SetMigration(ctx, cfg)`. @@ -135,20 +147,36 @@ type Store interface { // If no matching collection is found an error will be returned. GetCollectionByName(context.Context, CollectionName) (Collection, error) - // GetCollectionBySchemaID attempts to retrieve a collection matching the given schema ID. + // GetCollectionsBySchemaRoot attempts to retrieve all collections using the given schema ID. // - // If no matching collection is found an error will be returned. - GetCollectionBySchemaID(context.Context, string) (Collection, error) + // If no matching collection is found an empty set will be returned. + GetCollectionsBySchemaRoot(context.Context, string) ([]Collection, error) - // GetCollectionBySchemaID attempts to retrieve a collection matching the given schema version ID. + // GetCollectionsByVersionID attempts to retrieve all collections using the given schema version ID. // - // If no matching collection is found an error will be returned. - GetCollectionByVersionID(context.Context, string) (Collection, error) + // If no matching collections are found an empty set will be returned. + GetCollectionsByVersionID(context.Context, string) ([]Collection, error) // GetAllCollections returns all the collections and their descriptions that currently exist within // this [Store]. GetAllCollections(context.Context) ([]Collection, error) + // GetSchemasByName returns the all schema versions with the given name. + GetSchemasByName(context.Context, string) ([]SchemaDescription, error) + + // GetSchemaByVersionID returns the schema description for the schema version of the + // ID provided. + // + // Will return an error if it is not found. + GetSchemaByVersionID(context.Context, string) (SchemaDescription, error) + + // GetSchemasByRoot returns the all schema versions for the given root. + GetSchemasByRoot(context.Context, string) ([]SchemaDescription, error) + + // GetAllSchemas returns all schema versions that currently exist within + // this [Store]. + GetAllSchemas(context.Context) ([]SchemaDescription, error) + // GetAllIndexes returns all the indexes that currently exist within this [Store]. GetAllIndexes(context.Context) (map[CollectionName][]IndexDescription, error) diff --git a/client/descriptions.go b/client/descriptions.go index 0b44f36b83..efe470114d 100644 --- a/client/descriptions.go +++ b/client/descriptions.go @@ -27,8 +27,8 @@ type CollectionDescription struct { // It is immutable. ID uint32 - // Schema contains the data type information that this Collection uses. - Schema SchemaDescription + // The ID of the schema version that this collection is at. + SchemaVersionID string // Indexes contains the secondary indexes that this Collection has. Indexes []IndexDescription @@ -41,24 +41,36 @@ func (col CollectionDescription) IDString() string { // GetFieldByID searches for a field with the given ID. If such a field is found it // will return it and true, if it is not found it will return false. -func (col CollectionDescription) GetFieldByID(id FieldID) (FieldDescription, bool) { - if !col.Schema.IsEmpty() { - for _, field := range col.Schema.Fields { - if field.ID == id { - return field, true - } +func (col CollectionDescription) GetFieldByID(id FieldID, schema *SchemaDescription) (FieldDescription, bool) { + for _, field := range schema.Fields { + if field.ID == id { + return field, true } } return FieldDescription{}, false } -// GetRelation returns the field that supports the relation of the given name. -func (col CollectionDescription) GetRelation(name string) (FieldDescription, bool) { - if !col.Schema.IsEmpty() { - for _, field := range col.Schema.Fields { - if field.RelationName == name { - return field, true - } +// GetFieldByName returns the field for the given field name. If such a field is found it +// will return it and true, if it is not found it will return false. +func (col CollectionDescription) GetFieldByName(fieldName string, schema *SchemaDescription) (FieldDescription, bool) { + for _, field := range schema.Fields { + if field.Name == fieldName { + return field, true + } + } + return FieldDescription{}, false +} + +// GetFieldByRelation returns the field that supports the relation of the given name. +func (col CollectionDescription) GetFieldByRelation( + relationName string, + otherCollectionName string, + otherFieldName string, + schema *SchemaDescription, +) (FieldDescription, bool) { + for _, field := range schema.Fields { + if field.RelationName == relationName && !(col.Name == otherCollectionName && otherFieldName == field.Name) { + return field, true } } return FieldDescription{}, false @@ -66,10 +78,10 @@ func (col CollectionDescription) GetRelation(name string) (FieldDescription, boo // SchemaDescription describes a Schema and its associated metadata. type SchemaDescription struct { - // SchemaID is the version agnostic identifier for this schema. + // Root is the version agnostic identifier for this schema. // // It remains constant throughout the lifetime of this schema. - SchemaID string + Root string // VersionID is the version-specific identifier for this schema. // @@ -91,28 +103,11 @@ type SchemaDescription struct { Fields []FieldDescription } -// IsEmpty returns true if the SchemaDescription is empty and uninitialized -func (sd SchemaDescription) IsEmpty() bool { - return len(sd.Fields) == 0 -} - -// GetFieldKey returns the field ID for the given field name. -func (sd SchemaDescription) GetFieldKey(fieldName string) uint32 { - for _, field := range sd.Fields { - if field.Name == fieldName { - return uint32(field.ID) - } - } - return uint32(0) -} - // GetField returns the field of the given name. func (sd SchemaDescription) GetField(name string) (FieldDescription, bool) { - if !sd.IsEmpty() { - for _, field := range sd.Fields { - if field.Name == name { - return field, true - } + for _, field := range sd.Fields { + if field.Name == name { + return field, true } } return FieldDescription{}, false diff --git a/client/document.go b/client/document.go index c48ccfce88..bcb8ae6070 100644 --- a/client/document.go +++ b/client/document.go @@ -398,6 +398,26 @@ func (doc *Document) ToMap() (map[string]any, error) { return doc.toMapWithKey() } +// ToJSONPatch returns a json patch that can be used to update +// a document by calling SetWithJSON. +func (doc *Document) ToJSONPatch() ([]byte, error) { + docMap, err := doc.toMap() + if err != nil { + return nil, err + } + + for field, value := range doc.Values() { + if !value.IsDirty() { + delete(docMap, field.Name()) + } + if value.IsDelete() { + docMap[field.Name()] = nil + } + } + + return json.Marshal(docMap) +} + // Clean cleans the document by removing all dirty fields. func (doc *Document) Clean() { for _, v := range doc.Fields() { diff --git a/client/errors.go b/client/errors.go index ad1ad0027a..048d96c00d 100644 --- a/client/errors.go +++ b/client/errors.go @@ -23,6 +23,7 @@ const ( errUninitializeProperty string = "invalid state, required property is uninitialized" errMaxTxnRetries string = "reached maximum transaction reties" errRelationOneSided string = "relation must be defined on both schemas" + errCollectionNotFound string = "collection not found" ) // Errors returnable from this package. @@ -45,6 +46,7 @@ var ( ErrInvalidDocKeyVersion = errors.New("invalid DocKey version") ErrMaxTxnRetries = errors.New(errMaxTxnRetries) ErrRelationOneSided = errors.New(errRelationOneSided) + ErrCollectionNotFound = errors.New(errCollectionNotFound) ) // NewErrFieldNotExist returns an error indicating that the given field does not exist. @@ -107,3 +109,17 @@ func NewErrRelationOneSided(fieldName string, typeName string) error { errors.NewKV("Type", typeName), ) } + +func NewErrCollectionNotFoundForSchemaVersion(schemaVersionID string) error { + return errors.New( + errCollectionNotFound, + errors.NewKV("SchemaVersionID", schemaVersionID), + ) +} + +func NewErrCollectionNotFoundForSchema(schemaRoot string) error { + return errors.New( + errCollectionNotFound, + errors.NewKV("SchemaRoot", schemaRoot), + ) +} diff --git a/client/index.go b/client/index.go index 47b52f00c5..69f0362017 100644 --- a/client/index.go +++ b/client/index.go @@ -37,3 +37,22 @@ type IndexDescription struct { // Fields contains the fields that are being indexed. Fields []IndexedFieldDescription } + +// CollectIndexedFields returns all fields that are indexed by all collection indexes. +func (d CollectionDescription) CollectIndexedFields(schema *SchemaDescription) []FieldDescription { + fieldsMap := make(map[string]bool) + fields := make([]FieldDescription, 0, len(d.Indexes)) + for _, index := range d.Indexes { + for _, field := range index.Fields { + for i := range schema.Fields { + colField := schema.Fields[i] + if field.Name == colField.Name && !fieldsMap[field.Name] { + fieldsMap[field.Name] = true + fields = append(fields, colField) + break + } + } + } + } + return fields +} diff --git a/client/mocks/collection.go b/client/mocks/collection.go index 16d052c337..f3d7f58354 100644 --- a/client/mocks/collection.go +++ b/client/mocks/collection.go @@ -164,6 +164,47 @@ func (_c *Collection_CreateMany_Call) RunAndReturn(run func(context.Context, []* return _c } +// Definition provides a mock function with given fields: +func (_m *Collection) Definition() client.CollectionDefinition { + ret := _m.Called() + + var r0 client.CollectionDefinition + if rf, ok := ret.Get(0).(func() client.CollectionDefinition); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.CollectionDefinition) + } + + return r0 +} + +// Collection_Definition_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Definition' +type Collection_Definition_Call struct { + *mock.Call +} + +// Definition is a helper method to define mock.On call +func (_e *Collection_Expecter) Definition() *Collection_Definition_Call { + return &Collection_Definition_Call{Call: _e.mock.On("Definition")} +} + +func (_c *Collection_Definition_Call) Run(run func()) *Collection_Definition_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Collection_Definition_Call) Return(_a0 client.CollectionDefinition) *Collection_Definition_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Collection_Definition_Call) RunAndReturn(run func() client.CollectionDefinition) *Collection_Definition_Call { + _c.Call.Return(run) + return _c +} + // Delete provides a mock function with given fields: _a0, _a1 func (_m *Collection) Delete(_a0 context.Context, _a1 client.DocKey) (bool, error) { ret := _m.Called(_a0, _a1) @@ -904,8 +945,8 @@ func (_c *Collection_Schema_Call) RunAndReturn(run func() client.SchemaDescripti return _c } -// SchemaID provides a mock function with given fields: -func (_m *Collection) SchemaID() string { +// SchemaRoot provides a mock function with given fields: +func (_m *Collection) SchemaRoot() string { ret := _m.Called() var r0 string @@ -918,29 +959,29 @@ func (_m *Collection) SchemaID() string { return r0 } -// Collection_SchemaID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SchemaID' -type Collection_SchemaID_Call struct { +// Collection_SchemaRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SchemaRoot' +type Collection_SchemaRoot_Call struct { *mock.Call } -// SchemaID is a helper method to define mock.On call -func (_e *Collection_Expecter) SchemaID() *Collection_SchemaID_Call { - return &Collection_SchemaID_Call{Call: _e.mock.On("SchemaID")} +// SchemaRoot is a helper method to define mock.On call +func (_e *Collection_Expecter) SchemaRoot() *Collection_SchemaRoot_Call { + return &Collection_SchemaRoot_Call{Call: _e.mock.On("SchemaRoot")} } -func (_c *Collection_SchemaID_Call) Run(run func()) *Collection_SchemaID_Call { +func (_c *Collection_SchemaRoot_Call) Run(run func()) *Collection_SchemaRoot_Call { _c.Call.Run(func(args mock.Arguments) { run() }) return _c } -func (_c *Collection_SchemaID_Call) Return(_a0 string) *Collection_SchemaID_Call { +func (_c *Collection_SchemaRoot_Call) Return(_a0 string) *Collection_SchemaRoot_Call { _c.Call.Return(_a0) return _c } -func (_c *Collection_SchemaID_Call) RunAndReturn(run func() string) *Collection_SchemaID_Call { +func (_c *Collection_SchemaRoot_Call) RunAndReturn(run func() string) *Collection_SchemaRoot_Call { _c.Call.Return(run) return _c } diff --git a/client/mocks/db.go b/client/mocks/db.go index cb0af26193..df7b53fb5a 100644 --- a/client/mocks/db.go +++ b/client/mocks/db.go @@ -28,49 +28,6 @@ func (_m *DB) EXPECT() *DB_Expecter { return &DB_Expecter{mock: &_m.Mock} } -// AddP2PCollection provides a mock function with given fields: ctx, collectionID -func (_m *DB) AddP2PCollection(ctx context.Context, collectionID string) error { - ret := _m.Called(ctx, collectionID) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { - r0 = rf(ctx, collectionID) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DB_AddP2PCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AddP2PCollection' -type DB_AddP2PCollection_Call struct { - *mock.Call -} - -// AddP2PCollection is a helper method to define mock.On call -// - ctx context.Context -// - collectionID string -func (_e *DB_Expecter) AddP2PCollection(ctx interface{}, collectionID interface{}) *DB_AddP2PCollection_Call { - return &DB_AddP2PCollection_Call{Call: _e.mock.On("AddP2PCollection", ctx, collectionID)} -} - -func (_c *DB_AddP2PCollection_Call) Run(run func(ctx context.Context, collectionID string)) *DB_AddP2PCollection_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string)) - }) - return _c -} - -func (_c *DB_AddP2PCollection_Call) Return(_a0 error) *DB_AddP2PCollection_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *DB_AddP2PCollection_Call) RunAndReturn(run func(context.Context, string) error) *DB_AddP2PCollection_Call { - _c.Call.Return(run) - return _c -} - // AddSchema provides a mock function with given fields: _a0, _a1 func (_m *DB) AddSchema(_a0 context.Context, _a1 string) ([]client.CollectionDescription, error) { ret := _m.Called(_a0, _a1) @@ -255,9 +212,9 @@ func (_c *DB_Blockstore_Call) RunAndReturn(run func() blockstore.Blockstore) *DB return _c } -// Close provides a mock function with given fields: _a0 -func (_m *DB) Close(_a0 context.Context) { - _m.Called(_a0) +// Close provides a mock function with given fields: +func (_m *DB) Close() { + _m.Called() } // DB_Close_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Close' @@ -266,14 +223,13 @@ type DB_Close_Call struct { } // Close is a helper method to define mock.On call -// - _a0 context.Context -func (_e *DB_Expecter) Close(_a0 interface{}) *DB_Close_Call { - return &DB_Close_Call{Call: _e.mock.On("Close", _a0)} +func (_e *DB_Expecter) Close() *DB_Close_Call { + return &DB_Close_Call{Call: _e.mock.On("Close")} } -func (_c *DB_Close_Call) Run(run func(_a0 context.Context)) *DB_Close_Call { +func (_c *DB_Close_Call) Run(run func()) *DB_Close_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) + run() }) return _c } @@ -283,50 +239,7 @@ func (_c *DB_Close_Call) Return() *DB_Close_Call { return _c } -func (_c *DB_Close_Call) RunAndReturn(run func(context.Context)) *DB_Close_Call { - _c.Call.Return(run) - return _c -} - -// DeleteReplicator provides a mock function with given fields: ctx, rep -func (_m *DB) DeleteReplicator(ctx context.Context, rep client.Replicator) error { - ret := _m.Called(ctx, rep) - - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, client.Replicator) error); ok { - r0 = rf(ctx, rep) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// DB_DeleteReplicator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteReplicator' -type DB_DeleteReplicator_Call struct { - *mock.Call -} - -// DeleteReplicator is a helper method to define mock.On call -// - ctx context.Context -// - rep client.Replicator -func (_e *DB_Expecter) DeleteReplicator(ctx interface{}, rep interface{}) *DB_DeleteReplicator_Call { - return &DB_DeleteReplicator_Call{Call: _e.mock.On("DeleteReplicator", ctx, rep)} -} - -func (_c *DB_DeleteReplicator_Call) Run(run func(ctx context.Context, rep client.Replicator)) *DB_DeleteReplicator_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(client.Replicator)) - }) - return _c -} - -func (_c *DB_DeleteReplicator_Call) Return(_a0 error) *DB_DeleteReplicator_Call { - _c.Call.Return(_a0) - return _c -} - -func (_c *DB_DeleteReplicator_Call) RunAndReturn(run func(context.Context, client.Replicator) error) *DB_DeleteReplicator_Call { +func (_c *DB_Close_Call) RunAndReturn(run func()) *DB_Close_Call { _c.Call.Return(run) return _c } @@ -525,25 +438,25 @@ func (_c *DB_GetAllIndexes_Call) RunAndReturn(run func(context.Context) (map[str return _c } -// GetAllP2PCollections provides a mock function with given fields: ctx -func (_m *DB) GetAllP2PCollections(ctx context.Context) ([]string, error) { - ret := _m.Called(ctx) +// GetAllSchemas provides a mock function with given fields: _a0 +func (_m *DB) GetAllSchemas(_a0 context.Context) ([]client.SchemaDescription, error) { + ret := _m.Called(_a0) - var r0 []string + var r0 []client.SchemaDescription var r1 error - if rf, ok := ret.Get(0).(func(context.Context) ([]string, error)); ok { - return rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context) ([]client.SchemaDescription, error)); ok { + return rf(_a0) } - if rf, ok := ret.Get(0).(func(context.Context) []string); ok { - r0 = rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context) []client.SchemaDescription); ok { + r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]string) + r0 = ret.Get(0).([]client.SchemaDescription) } } if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) + r1 = rf(_a0) } else { r1 = ret.Error(1) } @@ -551,53 +464,53 @@ func (_m *DB) GetAllP2PCollections(ctx context.Context) ([]string, error) { return r0, r1 } -// DB_GetAllP2PCollections_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllP2PCollections' -type DB_GetAllP2PCollections_Call struct { +// DB_GetAllSchema_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllSchemas' +type DB_GetAllSchema_Call struct { *mock.Call } -// GetAllP2PCollections is a helper method to define mock.On call -// - ctx context.Context -func (_e *DB_Expecter) GetAllP2PCollections(ctx interface{}) *DB_GetAllP2PCollections_Call { - return &DB_GetAllP2PCollections_Call{Call: _e.mock.On("GetAllP2PCollections", ctx)} +// GetAllSchemas is a helper method to define mock.On call +// - _a0 context.Context +func (_e *DB_Expecter) GetAllSchemas(_a0 interface{}) *DB_GetAllSchema_Call { + return &DB_GetAllSchema_Call{Call: _e.mock.On("GetAllSchemas", _a0)} } -func (_c *DB_GetAllP2PCollections_Call) Run(run func(ctx context.Context)) *DB_GetAllP2PCollections_Call { +func (_c *DB_GetAllSchema_Call) Run(run func(_a0 context.Context)) *DB_GetAllSchema_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context)) }) return _c } -func (_c *DB_GetAllP2PCollections_Call) Return(_a0 []string, _a1 error) *DB_GetAllP2PCollections_Call { +func (_c *DB_GetAllSchema_Call) Return(_a0 []client.SchemaDescription, _a1 error) *DB_GetAllSchema_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *DB_GetAllP2PCollections_Call) RunAndReturn(run func(context.Context) ([]string, error)) *DB_GetAllP2PCollections_Call { +func (_c *DB_GetAllSchema_Call) RunAndReturn(run func(context.Context) ([]client.SchemaDescription, error)) *DB_GetAllSchema_Call { _c.Call.Return(run) return _c } -// GetAllReplicators provides a mock function with given fields: ctx -func (_m *DB) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { - ret := _m.Called(ctx) +// GetCollectionByName provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetCollectionByName(_a0 context.Context, _a1 string) (client.Collection, error) { + ret := _m.Called(_a0, _a1) - var r0 []client.Replicator + var r0 client.Collection var r1 error - if rf, ok := ret.Get(0).(func(context.Context) ([]client.Replicator, error)); ok { - return rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context, string) (client.Collection, error)); ok { + return rf(_a0, _a1) } - if rf, ok := ret.Get(0).(func(context.Context) []client.Replicator); ok { - r0 = rf(ctx) + if rf, ok := ret.Get(0).(func(context.Context, string) client.Collection); ok { + r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]client.Replicator) + r0 = ret.Get(0).(client.Collection) } } - if rf, ok := ret.Get(1).(func(context.Context) error); ok { - r1 = rf(ctx) + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) } @@ -605,48 +518,49 @@ func (_m *DB) GetAllReplicators(ctx context.Context) ([]client.Replicator, error return r0, r1 } -// DB_GetAllReplicators_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAllReplicators' -type DB_GetAllReplicators_Call struct { +// DB_GetCollectionByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionByName' +type DB_GetCollectionByName_Call struct { *mock.Call } -// GetAllReplicators is a helper method to define mock.On call -// - ctx context.Context -func (_e *DB_Expecter) GetAllReplicators(ctx interface{}) *DB_GetAllReplicators_Call { - return &DB_GetAllReplicators_Call{Call: _e.mock.On("GetAllReplicators", ctx)} +// GetCollectionByName is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) GetCollectionByName(_a0 interface{}, _a1 interface{}) *DB_GetCollectionByName_Call { + return &DB_GetCollectionByName_Call{Call: _e.mock.On("GetCollectionByName", _a0, _a1)} } -func (_c *DB_GetAllReplicators_Call) Run(run func(ctx context.Context)) *DB_GetAllReplicators_Call { +func (_c *DB_GetCollectionByName_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionByName_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) + run(args[0].(context.Context), args[1].(string)) }) return _c } -func (_c *DB_GetAllReplicators_Call) Return(_a0 []client.Replicator, _a1 error) *DB_GetAllReplicators_Call { +func (_c *DB_GetCollectionByName_Call) Return(_a0 client.Collection, _a1 error) *DB_GetCollectionByName_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *DB_GetAllReplicators_Call) RunAndReturn(run func(context.Context) ([]client.Replicator, error)) *DB_GetAllReplicators_Call { +func (_c *DB_GetCollectionByName_Call) RunAndReturn(run func(context.Context, string) (client.Collection, error)) *DB_GetCollectionByName_Call { _c.Call.Return(run) return _c } -// GetCollectionByName provides a mock function with given fields: _a0, _a1 -func (_m *DB) GetCollectionByName(_a0 context.Context, _a1 string) (client.Collection, error) { +// GetCollectionsBySchemaRoot provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetCollectionsBySchemaRoot(_a0 context.Context, _a1 string) ([]client.Collection, error) { ret := _m.Called(_a0, _a1) - var r0 client.Collection + var r0 []client.Collection var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) (client.Collection, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) ([]client.Collection, error)); ok { return rf(_a0, _a1) } - if rf, ok := ret.Get(0).(func(context.Context, string) client.Collection); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) []client.Collection); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(client.Collection) + r0 = ret.Get(0).([]client.Collection) } } @@ -659,49 +573,49 @@ func (_m *DB) GetCollectionByName(_a0 context.Context, _a1 string) (client.Colle return r0, r1 } -// DB_GetCollectionByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionByName' -type DB_GetCollectionByName_Call struct { +// DB_GetCollectionsBySchemaRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionsBySchemaRoot' +type DB_GetCollectionsBySchemaRoot_Call struct { *mock.Call } -// GetCollectionByName is a helper method to define mock.On call +// GetCollectionsBySchemaRoot is a helper method to define mock.On call // - _a0 context.Context // - _a1 string -func (_e *DB_Expecter) GetCollectionByName(_a0 interface{}, _a1 interface{}) *DB_GetCollectionByName_Call { - return &DB_GetCollectionByName_Call{Call: _e.mock.On("GetCollectionByName", _a0, _a1)} +func (_e *DB_Expecter) GetCollectionsBySchemaRoot(_a0 interface{}, _a1 interface{}) *DB_GetCollectionsBySchemaRoot_Call { + return &DB_GetCollectionsBySchemaRoot_Call{Call: _e.mock.On("GetCollectionsBySchemaRoot", _a0, _a1)} } -func (_c *DB_GetCollectionByName_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionByName_Call { +func (_c *DB_GetCollectionsBySchemaRoot_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionsBySchemaRoot_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(string)) }) return _c } -func (_c *DB_GetCollectionByName_Call) Return(_a0 client.Collection, _a1 error) *DB_GetCollectionByName_Call { +func (_c *DB_GetCollectionsBySchemaRoot_Call) Return(_a0 []client.Collection, _a1 error) *DB_GetCollectionsBySchemaRoot_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *DB_GetCollectionByName_Call) RunAndReturn(run func(context.Context, string) (client.Collection, error)) *DB_GetCollectionByName_Call { +func (_c *DB_GetCollectionsBySchemaRoot_Call) RunAndReturn(run func(context.Context, string) ([]client.Collection, error)) *DB_GetCollectionsBySchemaRoot_Call { _c.Call.Return(run) return _c } -// GetCollectionBySchemaID provides a mock function with given fields: _a0, _a1 -func (_m *DB) GetCollectionBySchemaID(_a0 context.Context, _a1 string) (client.Collection, error) { +// GetCollectionsByVersionID provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetCollectionsByVersionID(_a0 context.Context, _a1 string) ([]client.Collection, error) { ret := _m.Called(_a0, _a1) - var r0 client.Collection + var r0 []client.Collection var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) (client.Collection, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) ([]client.Collection, error)); ok { return rf(_a0, _a1) } - if rf, ok := ret.Get(0).(func(context.Context, string) client.Collection); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) []client.Collection); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(client.Collection) + r0 = ret.Get(0).([]client.Collection) } } @@ -714,49 +628,49 @@ func (_m *DB) GetCollectionBySchemaID(_a0 context.Context, _a1 string) (client.C return r0, r1 } -// DB_GetCollectionBySchemaID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionBySchemaID' -type DB_GetCollectionBySchemaID_Call struct { +// DB_GetCollectionsByVersionID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionsByVersionID' +type DB_GetCollectionsByVersionID_Call struct { *mock.Call } -// GetCollectionBySchemaID is a helper method to define mock.On call +// GetCollectionsByVersionID is a helper method to define mock.On call // - _a0 context.Context // - _a1 string -func (_e *DB_Expecter) GetCollectionBySchemaID(_a0 interface{}, _a1 interface{}) *DB_GetCollectionBySchemaID_Call { - return &DB_GetCollectionBySchemaID_Call{Call: _e.mock.On("GetCollectionBySchemaID", _a0, _a1)} +func (_e *DB_Expecter) GetCollectionsByVersionID(_a0 interface{}, _a1 interface{}) *DB_GetCollectionsByVersionID_Call { + return &DB_GetCollectionsByVersionID_Call{Call: _e.mock.On("GetCollectionsByVersionID", _a0, _a1)} } -func (_c *DB_GetCollectionBySchemaID_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionBySchemaID_Call { +func (_c *DB_GetCollectionsByVersionID_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionsByVersionID_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(string)) }) return _c } -func (_c *DB_GetCollectionBySchemaID_Call) Return(_a0 client.Collection, _a1 error) *DB_GetCollectionBySchemaID_Call { +func (_c *DB_GetCollectionsByVersionID_Call) Return(_a0 []client.Collection, _a1 error) *DB_GetCollectionsByVersionID_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *DB_GetCollectionBySchemaID_Call) RunAndReturn(run func(context.Context, string) (client.Collection, error)) *DB_GetCollectionBySchemaID_Call { +func (_c *DB_GetCollectionsByVersionID_Call) RunAndReturn(run func(context.Context, string) ([]client.Collection, error)) *DB_GetCollectionsByVersionID_Call { _c.Call.Return(run) return _c } -// GetCollectionByVersionID provides a mock function with given fields: _a0, _a1 -func (_m *DB) GetCollectionByVersionID(_a0 context.Context, _a1 string) (client.Collection, error) { +// GetSchemasByName provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetSchemasByName(_a0 context.Context, _a1 string) ([]client.SchemaDescription, error) { ret := _m.Called(_a0, _a1) - var r0 client.Collection + var r0 []client.SchemaDescription var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) (client.Collection, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) ([]client.SchemaDescription, error)); ok { return rf(_a0, _a1) } - if rf, ok := ret.Get(0).(func(context.Context, string) client.Collection); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) []client.SchemaDescription); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(client.Collection) + r0 = ret.Get(0).([]client.SchemaDescription) } } @@ -769,31 +683,139 @@ func (_m *DB) GetCollectionByVersionID(_a0 context.Context, _a1 string) (client. return r0, r1 } -// DB_GetCollectionByVersionID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCollectionByVersionID' -type DB_GetCollectionByVersionID_Call struct { +// DB_GetSchemaByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSchemasByName' +type DB_GetSchemaByName_Call struct { *mock.Call } -// GetCollectionByVersionID is a helper method to define mock.On call +// GetSchemasByName is a helper method to define mock.On call // - _a0 context.Context // - _a1 string -func (_e *DB_Expecter) GetCollectionByVersionID(_a0 interface{}, _a1 interface{}) *DB_GetCollectionByVersionID_Call { - return &DB_GetCollectionByVersionID_Call{Call: _e.mock.On("GetCollectionByVersionID", _a0, _a1)} +func (_e *DB_Expecter) GetSchemasByName(_a0 interface{}, _a1 interface{}) *DB_GetSchemaByName_Call { + return &DB_GetSchemaByName_Call{Call: _e.mock.On("GetSchemasByName", _a0, _a1)} } -func (_c *DB_GetCollectionByVersionID_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetCollectionByVersionID_Call { +func (_c *DB_GetSchemaByName_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetSchemaByName_Call { _c.Call.Run(func(args mock.Arguments) { run(args[0].(context.Context), args[1].(string)) }) return _c } -func (_c *DB_GetCollectionByVersionID_Call) Return(_a0 client.Collection, _a1 error) *DB_GetCollectionByVersionID_Call { +func (_c *DB_GetSchemaByName_Call) Return(_a0 []client.SchemaDescription, _a1 error) *DB_GetSchemaByName_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *DB_GetCollectionByVersionID_Call) RunAndReturn(run func(context.Context, string) (client.Collection, error)) *DB_GetCollectionByVersionID_Call { +func (_c *DB_GetSchemaByName_Call) RunAndReturn(run func(context.Context, string) ([]client.SchemaDescription, error)) *DB_GetSchemaByName_Call { + _c.Call.Return(run) + return _c +} + +// GetSchemasByRoot provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetSchemasByRoot(_a0 context.Context, _a1 string) ([]client.SchemaDescription, error) { + ret := _m.Called(_a0, _a1) + + var r0 []client.SchemaDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]client.SchemaDescription, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []client.SchemaDescription); ok { + r0 = rf(_a0, _a1) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]client.SchemaDescription) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetSchemaByRoot_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSchemasByRoot' +type DB_GetSchemaByRoot_Call struct { + *mock.Call +} + +// GetSchemasByRoot is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) GetSchemasByRoot(_a0 interface{}, _a1 interface{}) *DB_GetSchemaByRoot_Call { + return &DB_GetSchemaByRoot_Call{Call: _e.mock.On("GetSchemasByRoot", _a0, _a1)} +} + +func (_c *DB_GetSchemaByRoot_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetSchemaByRoot_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_GetSchemaByRoot_Call) Return(_a0 []client.SchemaDescription, _a1 error) *DB_GetSchemaByRoot_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetSchemaByRoot_Call) RunAndReturn(run func(context.Context, string) ([]client.SchemaDescription, error)) *DB_GetSchemaByRoot_Call { + _c.Call.Return(run) + return _c +} + +// GetSchemaByVersionID provides a mock function with given fields: _a0, _a1 +func (_m *DB) GetSchemaByVersionID(_a0 context.Context, _a1 string) (client.SchemaDescription, error) { + ret := _m.Called(_a0, _a1) + + var r0 client.SchemaDescription + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (client.SchemaDescription, error)); ok { + return rf(_a0, _a1) + } + if rf, ok := ret.Get(0).(func(context.Context, string) client.SchemaDescription); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(client.SchemaDescription) + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(_a0, _a1) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DB_GetSchemaByVersionID_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSchemaByVersionID' +type DB_GetSchemaByVersionID_Call struct { + *mock.Call +} + +// GetSchemaByVersionID is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 string +func (_e *DB_Expecter) GetSchemaByVersionID(_a0 interface{}, _a1 interface{}) *DB_GetSchemaByVersionID_Call { + return &DB_GetSchemaByVersionID_Call{Call: _e.mock.On("GetSchemaByVersionID", _a0, _a1)} +} + +func (_c *DB_GetSchemaByVersionID_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_GetSchemaByVersionID_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *DB_GetSchemaByVersionID_Call) Return(_a0 client.SchemaDescription, _a1 error) *DB_GetSchemaByVersionID_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *DB_GetSchemaByVersionID_Call) RunAndReturn(run func(context.Context, string) (client.SchemaDescription, error)) *DB_GetSchemaByVersionID_Call { _c.Call.Return(run) return _c } @@ -992,13 +1014,13 @@ func (_c *DB_NewTxn_Call) RunAndReturn(run func(context.Context, bool) (datastor return _c } -// PatchSchema provides a mock function with given fields: _a0, _a1 -func (_m *DB) PatchSchema(_a0 context.Context, _a1 string) error { - ret := _m.Called(_a0, _a1) +// PatchSchema provides a mock function with given fields: _a0, _a1, _a2 +func (_m *DB) PatchSchema(_a0 context.Context, _a1 string, _a2 bool) error { + ret := _m.Called(_a0, _a1, _a2) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { - r0 = rf(_a0, _a1) + if rf, ok := ret.Get(0).(func(context.Context, string, bool) error); ok { + r0 = rf(_a0, _a1, _a2) } else { r0 = ret.Error(0) } @@ -1014,13 +1036,14 @@ type DB_PatchSchema_Call struct { // PatchSchema is a helper method to define mock.On call // - _a0 context.Context // - _a1 string -func (_e *DB_Expecter) PatchSchema(_a0 interface{}, _a1 interface{}) *DB_PatchSchema_Call { - return &DB_PatchSchema_Call{Call: _e.mock.On("PatchSchema", _a0, _a1)} +// - _a2 bool +func (_e *DB_Expecter) PatchSchema(_a0 interface{}, _a1 interface{}, _a2 interface{}) *DB_PatchSchema_Call { + return &DB_PatchSchema_Call{Call: _e.mock.On("PatchSchema", _a0, _a1, _a2)} } -func (_c *DB_PatchSchema_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_PatchSchema_Call { +func (_c *DB_PatchSchema_Call) Run(run func(_a0 context.Context, _a1 string, _a2 bool)) *DB_PatchSchema_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string)) + run(args[0].(context.Context), args[1].(string), args[2].(bool)) }) return _c } @@ -1030,60 +1053,61 @@ func (_c *DB_PatchSchema_Call) Return(_a0 error) *DB_PatchSchema_Call { return _c } -func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string) error) *DB_PatchSchema_Call { +func (_c *DB_PatchSchema_Call) RunAndReturn(run func(context.Context, string, bool) error) *DB_PatchSchema_Call { _c.Call.Return(run) return _c } -// PrintDump provides a mock function with given fields: ctx -func (_m *DB) PrintDump(ctx context.Context) error { - ret := _m.Called(ctx) +// Peerstore provides a mock function with given fields: +func (_m *DB) Peerstore() datastore.DSBatching { + ret := _m.Called() - var r0 error - if rf, ok := ret.Get(0).(func(context.Context) error); ok { - r0 = rf(ctx) + var r0 datastore.DSBatching + if rf, ok := ret.Get(0).(func() datastore.DSBatching); ok { + r0 = rf() } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.DSBatching) + } } return r0 } -// DB_PrintDump_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PrintDump' -type DB_PrintDump_Call struct { +// DB_Peerstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Peerstore' +type DB_Peerstore_Call struct { *mock.Call } -// PrintDump is a helper method to define mock.On call -// - ctx context.Context -func (_e *DB_Expecter) PrintDump(ctx interface{}) *DB_PrintDump_Call { - return &DB_PrintDump_Call{Call: _e.mock.On("PrintDump", ctx)} +// Peerstore is a helper method to define mock.On call +func (_e *DB_Expecter) Peerstore() *DB_Peerstore_Call { + return &DB_Peerstore_Call{Call: _e.mock.On("Peerstore")} } -func (_c *DB_PrintDump_Call) Run(run func(ctx context.Context)) *DB_PrintDump_Call { +func (_c *DB_Peerstore_Call) Run(run func()) *DB_Peerstore_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) + run() }) return _c } -func (_c *DB_PrintDump_Call) Return(_a0 error) *DB_PrintDump_Call { +func (_c *DB_Peerstore_Call) Return(_a0 datastore.DSBatching) *DB_Peerstore_Call { _c.Call.Return(_a0) return _c } -func (_c *DB_PrintDump_Call) RunAndReturn(run func(context.Context) error) *DB_PrintDump_Call { +func (_c *DB_Peerstore_Call) RunAndReturn(run func() datastore.DSBatching) *DB_Peerstore_Call { _c.Call.Return(run) return _c } -// RemoveP2PCollection provides a mock function with given fields: ctx, collectionID -func (_m *DB) RemoveP2PCollection(ctx context.Context, collectionID string) error { - ret := _m.Called(ctx, collectionID) +// PrintDump provides a mock function with given fields: ctx +func (_m *DB) PrintDump(ctx context.Context) error { + ret := _m.Called(ctx) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { - r0 = rf(ctx, collectionID) + if rf, ok := ret.Get(0).(func(context.Context) error); ok { + r0 = rf(ctx) } else { r0 = ret.Error(0) } @@ -1091,31 +1115,30 @@ func (_m *DB) RemoveP2PCollection(ctx context.Context, collectionID string) erro return r0 } -// DB_RemoveP2PCollection_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RemoveP2PCollection' -type DB_RemoveP2PCollection_Call struct { +// DB_PrintDump_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PrintDump' +type DB_PrintDump_Call struct { *mock.Call } -// RemoveP2PCollection is a helper method to define mock.On call +// PrintDump is a helper method to define mock.On call // - ctx context.Context -// - collectionID string -func (_e *DB_Expecter) RemoveP2PCollection(ctx interface{}, collectionID interface{}) *DB_RemoveP2PCollection_Call { - return &DB_RemoveP2PCollection_Call{Call: _e.mock.On("RemoveP2PCollection", ctx, collectionID)} +func (_e *DB_Expecter) PrintDump(ctx interface{}) *DB_PrintDump_Call { + return &DB_PrintDump_Call{Call: _e.mock.On("PrintDump", ctx)} } -func (_c *DB_RemoveP2PCollection_Call) Run(run func(ctx context.Context, collectionID string)) *DB_RemoveP2PCollection_Call { +func (_c *DB_PrintDump_Call) Run(run func(ctx context.Context)) *DB_PrintDump_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string)) + run(args[0].(context.Context)) }) return _c } -func (_c *DB_RemoveP2PCollection_Call) Return(_a0 error) *DB_RemoveP2PCollection_Call { +func (_c *DB_PrintDump_Call) Return(_a0 error) *DB_PrintDump_Call { _c.Call.Return(_a0) return _c } -func (_c *DB_RemoveP2PCollection_Call) RunAndReturn(run func(context.Context, string) error) *DB_RemoveP2PCollection_Call { +func (_c *DB_PrintDump_Call) RunAndReturn(run func(context.Context) error) *DB_PrintDump_Call { _c.Call.Return(run) return _c } @@ -1163,12 +1186,12 @@ func (_c *DB_Root_Call) RunAndReturn(run func() datastore.RootStore) *DB_Root_Ca return _c } -// SetMigration provides a mock function with given fields: _a0, _a1 -func (_m *DB) SetMigration(_a0 context.Context, _a1 client.LensConfig) error { +// SetDefaultSchemaVersion provides a mock function with given fields: _a0, _a1 +func (_m *DB) SetDefaultSchemaVersion(_a0 context.Context, _a1 string) error { ret := _m.Called(_a0, _a1) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, client.LensConfig) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) @@ -1177,42 +1200,42 @@ func (_m *DB) SetMigration(_a0 context.Context, _a1 client.LensConfig) error { return r0 } -// DB_SetMigration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetMigration' -type DB_SetMigration_Call struct { +// DB_SetDefaultSchemaVersion_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetDefaultSchemaVersion' +type DB_SetDefaultSchemaVersion_Call struct { *mock.Call } -// SetMigration is a helper method to define mock.On call +// SetDefaultSchemaVersion is a helper method to define mock.On call // - _a0 context.Context -// - _a1 client.LensConfig -func (_e *DB_Expecter) SetMigration(_a0 interface{}, _a1 interface{}) *DB_SetMigration_Call { - return &DB_SetMigration_Call{Call: _e.mock.On("SetMigration", _a0, _a1)} +// - _a1 string +func (_e *DB_Expecter) SetDefaultSchemaVersion(_a0 interface{}, _a1 interface{}) *DB_SetDefaultSchemaVersion_Call { + return &DB_SetDefaultSchemaVersion_Call{Call: _e.mock.On("SetDefaultSchemaVersion", _a0, _a1)} } -func (_c *DB_SetMigration_Call) Run(run func(_a0 context.Context, _a1 client.LensConfig)) *DB_SetMigration_Call { +func (_c *DB_SetDefaultSchemaVersion_Call) Run(run func(_a0 context.Context, _a1 string)) *DB_SetDefaultSchemaVersion_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(client.LensConfig)) + run(args[0].(context.Context), args[1].(string)) }) return _c } -func (_c *DB_SetMigration_Call) Return(_a0 error) *DB_SetMigration_Call { +func (_c *DB_SetDefaultSchemaVersion_Call) Return(_a0 error) *DB_SetDefaultSchemaVersion_Call { _c.Call.Return(_a0) return _c } -func (_c *DB_SetMigration_Call) RunAndReturn(run func(context.Context, client.LensConfig) error) *DB_SetMigration_Call { +func (_c *DB_SetDefaultSchemaVersion_Call) RunAndReturn(run func(context.Context, string) error) *DB_SetDefaultSchemaVersion_Call { _c.Call.Return(run) return _c } -// SetReplicator provides a mock function with given fields: ctx, rep -func (_m *DB) SetReplicator(ctx context.Context, rep client.Replicator) error { - ret := _m.Called(ctx, rep) +// SetMigration provides a mock function with given fields: _a0, _a1 +func (_m *DB) SetMigration(_a0 context.Context, _a1 client.LensConfig) error { + ret := _m.Called(_a0, _a1) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, client.Replicator) error); ok { - r0 = rf(ctx, rep) + if rf, ok := ret.Get(0).(func(context.Context, client.LensConfig) error); ok { + r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) } @@ -1220,31 +1243,31 @@ func (_m *DB) SetReplicator(ctx context.Context, rep client.Replicator) error { return r0 } -// DB_SetReplicator_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetReplicator' -type DB_SetReplicator_Call struct { +// DB_SetMigration_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SetMigration' +type DB_SetMigration_Call struct { *mock.Call } -// SetReplicator is a helper method to define mock.On call -// - ctx context.Context -// - rep client.Replicator -func (_e *DB_Expecter) SetReplicator(ctx interface{}, rep interface{}) *DB_SetReplicator_Call { - return &DB_SetReplicator_Call{Call: _e.mock.On("SetReplicator", ctx, rep)} +// SetMigration is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 client.LensConfig +func (_e *DB_Expecter) SetMigration(_a0 interface{}, _a1 interface{}) *DB_SetMigration_Call { + return &DB_SetMigration_Call{Call: _e.mock.On("SetMigration", _a0, _a1)} } -func (_c *DB_SetReplicator_Call) Run(run func(ctx context.Context, rep client.Replicator)) *DB_SetReplicator_Call { +func (_c *DB_SetMigration_Call) Run(run func(_a0 context.Context, _a1 client.LensConfig)) *DB_SetMigration_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(client.Replicator)) + run(args[0].(context.Context), args[1].(client.LensConfig)) }) return _c } -func (_c *DB_SetReplicator_Call) Return(_a0 error) *DB_SetReplicator_Call { +func (_c *DB_SetMigration_Call) Return(_a0 error) *DB_SetMigration_Call { _c.Call.Return(_a0) return _c } -func (_c *DB_SetReplicator_Call) RunAndReturn(run func(context.Context, client.Replicator) error) *DB_SetReplicator_Call { +func (_c *DB_SetMigration_Call) RunAndReturn(run func(context.Context, client.LensConfig) error) *DB_SetMigration_Call { _c.Call.Return(run) return _c } diff --git a/client/p2p.go b/client/p2p.go index 5f864fcb9d..12be6ebf8d 100644 --- a/client/p2p.go +++ b/client/p2p.go @@ -12,9 +12,17 @@ package client import ( "context" + + "github.com/libp2p/go-libp2p/core/peer" ) +// P2P is a peer connected database implementation. type P2P interface { + DB + + // PeerInfo returns the p2p host id and listening addresses. + PeerInfo() peer.AddrInfo + // SetReplicator adds a replicator to the persisted list or adds // schemas if the replicator already exists. SetReplicator(ctx context.Context, rep Replicator) error @@ -25,15 +33,15 @@ type P2P interface { // subscribed schemas. GetAllReplicators(ctx context.Context) ([]Replicator, error) - // AddP2PCollection adds the given collection ID that the P2P system - // subscribes to to the the persisted list. It will error if the provided - // collection ID is invalid. - AddP2PCollection(ctx context.Context, collectionID string) error + // AddP2PCollections adds the given collection IDs to the P2P system and + // subscribes to their topics. It will error if any of the provided + // collection IDs are invalid. + AddP2PCollections(ctx context.Context, collectionIDs []string) error - // RemoveP2PCollection removes the given collection ID that the P2P system - // subscribes to from the the persisted list. It will error if the provided - // collection ID is invalid. - RemoveP2PCollection(ctx context.Context, collectionID string) error + // RemoveP2PCollections removes the given collection IDs from the P2P system and + // unsubscribes from their topics. It will error if the provided + // collection IDs are invalid. + RemoveP2PCollections(ctx context.Context, collectionIDs []string) error // GetAllP2PCollections returns the list of persisted collection IDs that // the P2P system subscribes to. diff --git a/cmd/defradb/main.go b/cmd/defradb/main.go index 761666bea7..2406885a76 100644 --- a/cmd/defradb/main.go +++ b/cmd/defradb/main.go @@ -12,7 +12,6 @@ package main import ( - "context" "os" "github.com/sourcenetwork/defradb/cli" @@ -21,10 +20,13 @@ import ( // Execute adds all child commands to the root command and sets flags appropriately. func main() { - cfg := config.DefaultConfig() - ctx := context.Background() - defraCmd := cli.NewDefraCommand(cfg) - if err := defraCmd.Execute(ctx); err != nil { + defraCmd := cli.NewDefraCommand(config.DefaultConfig()) + if err := defraCmd.Execute(); err != nil { + // this error is okay to discard because cobra + // logs any errors encountered during execution + // + // exiting with a non-zero status code signals + // that an error has ocurred during execution os.Exit(1) } } diff --git a/cmd/genclidocs/genclidocs.go b/cmd/genclidocs/main.go similarity index 59% rename from cmd/genclidocs/genclidocs.go rename to cmd/genclidocs/main.go index bccc96b38c..f556c26d20 100644 --- a/cmd/genclidocs/genclidocs.go +++ b/cmd/genclidocs/main.go @@ -14,30 +14,33 @@ genclidocs is a tool to generate the command line interface documentation. package main import ( - "context" "flag" + "log" "os" "github.com/spf13/cobra/doc" "github.com/sourcenetwork/defradb/cli" "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" ) -var log = logging.MustNewLogger("genclidocs") +var path string + +func init() { + flag.StringVar(&path, "o", "docs/cmd", "path to write the cmd docs to") +} func main() { - path := flag.String("o", "docs/cmd", "path to write the cmd docs to") flag.Parse() - err := os.MkdirAll(*path, os.ModePerm) - if err != nil { - log.FatalE(context.Background(), "Creating the filesystem path failed", err) - } + defraCmd := cli.NewDefraCommand(config.DefaultConfig()) - defraCmd.RootCmd.DisableAutoGenTag = true - err = doc.GenMarkdownTree(defraCmd.RootCmd, *path) - if err != nil { - log.FatalE(context.Background(), "Generating cmd docs failed", err) + defraCmd.DisableAutoGenTag = true + + if err := os.MkdirAll(path, os.ModePerm); err != nil { + log.Fatal("Creating the filesystem path failed", err) + } + + if err := doc.GenMarkdownTree(defraCmd, path); err != nil { + log.Fatal("Generating cmd docs failed", err) } } diff --git a/cmd/genmanpages/main.go b/cmd/genmanpages/main.go index 7ec7a3ce59..1a9b43df7c 100644 --- a/cmd/genmanpages/main.go +++ b/cmd/genmanpages/main.go @@ -15,40 +15,39 @@ installation is packaging and system dependent. package main import ( - "context" "flag" + "log" "os" "github.com/spf13/cobra/doc" "github.com/sourcenetwork/defradb/cli" "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" ) const defaultPerm os.FileMode = 0o777 -var log = logging.MustNewLogger("genmanpages") +var dir string + +var header = &doc.GenManHeader{ + Title: "defradb - Peer-to-Peer Edge Database", + Section: "1", +} + +func init() { + flag.StringVar(&dir, "o", "build/man", "Directory in which to generate DefraDB man pages") +} func main() { - dirFlag := flag.String("o", "build/man", "Directory in which to generate DefraDB man pages") flag.Parse() - genRootManPages(*dirFlag) -} -func genRootManPages(dir string) { - ctx := context.Background() - header := &doc.GenManHeader{ - Title: "defradb - Peer-to-Peer Edge Database", - Section: "1", - } - err := os.MkdirAll(dir, defaultPerm) - if err != nil { - log.FatalE(ctx, "Failed to create directory", err, logging.NewKV("dir", dir)) - } defraCmd := cli.NewDefraCommand(config.DefaultConfig()) - err = doc.GenManTree(defraCmd.RootCmd, header, dir) - if err != nil { - log.FatalE(ctx, "Failed generation of man pages", err) + + if err := os.MkdirAll(dir, defaultPerm); err != nil { + log.Fatal("Failed to create directory", err) + } + + if err := doc.GenManTree(defraCmd, header, dir); err != nil { + log.Fatal("Failed generation of man pages", err) } } diff --git a/config/config.go b/config/config.go index 9832a92818..24d5ee73e7 100644 --- a/config/config.go +++ b/config/config.go @@ -51,7 +51,6 @@ import ( "strconv" "strings" "text/template" - "time" "github.com/mitchellh/mapstructure" ma "github.com/multiformats/go-multiaddr" @@ -350,79 +349,42 @@ func (apicfg *APIConfig) AddressToURL() string { // NetConfig configures aspects of network and peer-to-peer. type NetConfig struct { - P2PAddress string - P2PDisabled bool - Peers string - PubSubEnabled bool `mapstructure:"pubsub"` - RelayEnabled bool `mapstructure:"relay"` - RPCAddress string - RPCMaxConnectionIdle string - RPCTimeout string - TCPAddress string + P2PAddress string + P2PDisabled bool + Peers string + PubSubEnabled bool `mapstructure:"pubsub"` + RelayEnabled bool `mapstructure:"relay"` } func defaultNetConfig() *NetConfig { return &NetConfig{ - P2PAddress: "/ip4/0.0.0.0/tcp/9171", - P2PDisabled: false, - Peers: "", - PubSubEnabled: true, - RelayEnabled: false, - RPCAddress: "0.0.0.0:9161", - RPCMaxConnectionIdle: "5m", - RPCTimeout: "10s", - TCPAddress: "/ip4/0.0.0.0/tcp/9161", + P2PAddress: "/ip4/0.0.0.0/tcp/9171", + P2PDisabled: false, + Peers: "", + PubSubEnabled: true, + RelayEnabled: false, } } func (netcfg *NetConfig) validate() error { - _, err := time.ParseDuration(netcfg.RPCTimeout) - if err != nil { - return NewErrInvalidRPCTimeout(err, netcfg.RPCTimeout) - } - _, err = time.ParseDuration(netcfg.RPCMaxConnectionIdle) - if err != nil { - return NewErrInvalidRPCMaxConnectionIdle(err, netcfg.RPCMaxConnectionIdle) - } - _, err = ma.NewMultiaddr(netcfg.P2PAddress) + _, err := ma.NewMultiaddr(netcfg.P2PAddress) if err != nil { return NewErrInvalidP2PAddress(err, netcfg.P2PAddress) } - _, err = net.ResolveTCPAddr("tcp", netcfg.RPCAddress) - if err != nil { - return NewErrInvalidRPCAddress(err, netcfg.RPCAddress) - } if len(netcfg.Peers) > 0 { peers := strings.Split(netcfg.Peers, ",") maddrs := make([]ma.Multiaddr, len(peers)) for i, addr := range peers { - maddrs[i], err = ma.NewMultiaddr(addr) + addr, err := ma.NewMultiaddr(addr) if err != nil { return NewErrInvalidBootstrapPeers(err, netcfg.Peers) } + maddrs[i] = addr } } return nil } -// RPCTimeoutDuration gives the RPC timeout as a time.Duration. -func (netcfg *NetConfig) RPCTimeoutDuration() (time.Duration, error) { - d, err := time.ParseDuration(netcfg.RPCTimeout) - if err != nil { - return d, NewErrInvalidRPCTimeout(err, netcfg.RPCTimeout) - } - return d, nil -} - -// RPCMaxConnectionIdleDuration gives the RPC MaxConnectionIdle as a time.Duration. -func (netcfg *NetConfig) RPCMaxConnectionIdleDuration() (time.Duration, error) { - d, err := time.ParseDuration(netcfg.RPCMaxConnectionIdle) - if err != nil { - return d, NewErrInvalidRPCMaxConnectionIdle(err, netcfg.RPCMaxConnectionIdle) - } - return d, nil -} - // LogConfig configures output and logger. type LoggingConfig struct { Level string diff --git a/config/config_test.go b/config/config_test.go index b7ff295efa..e29ef8aa81 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -15,7 +15,6 @@ import ( "os" "path/filepath" "testing" - "time" "github.com/stretchr/testify/assert" ) @@ -26,8 +25,6 @@ var envVarsDifferent = map[string]string{ "DEFRA_API_ADDRESS": "localhost:9999", "DEFRA_NET_P2PDISABLED": "true", "DEFRA_NET_P2PADDRESS": "/ip4/0.0.0.0/tcp/9876", - "DEFRA_NET_RPCADDRESS": "localhost:7777", - "DEFRA_NET_RPCTIMEOUT": "90s", "DEFRA_NET_PUBSUB": "false", "DEFRA_NET_RELAY": "false", "DEFRA_LOG_LEVEL": "error", @@ -41,8 +38,6 @@ var envVarsInvalid = map[string]string{ "DEFRA_API_ADDRESS": "^=+()&**()*(&))", "DEFRA_NET_P2PDISABLED": "^=+()&**()*(&))", "DEFRA_NET_P2PADDRESS": "^=+()&**()*(&))", - "DEFRA_NET_RPCADDRESS": "^=+()&**()*(&))", - "DEFRA_NET_RPCTIMEOUT": "^=+()&**()*(&))", "DEFRA_NET_PUBSUB": "^=+()&**()*(&))", "DEFRA_NET_RELAY": "^=+()&**()*(&))", "DEFRA_LOG_LEVEL": "^=+()&**()*(&))", @@ -178,8 +173,6 @@ func TestEnvVariablesAllConsidered(t *testing.T) { assert.Equal(t, "memory", cfg.Datastore.Store) assert.Equal(t, true, cfg.Net.P2PDisabled) assert.Equal(t, "/ip4/0.0.0.0/tcp/9876", cfg.Net.P2PAddress) - assert.Equal(t, "localhost:7777", cfg.Net.RPCAddress) - assert.Equal(t, "90s", cfg.Net.RPCTimeout) assert.Equal(t, false, cfg.Net.PubSubEnabled) assert.Equal(t, false, cfg.Net.RelayEnabled) assert.Equal(t, "error", cfg.Log.Level) @@ -390,51 +383,6 @@ func TestValidationInvalidNetConfigPeers(t *testing.T) { assert.ErrorIs(t, err, ErrFailedToValidateConfig) } -func TestValidationInvalidRPCMaxConnectionIdle(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.RPCMaxConnectionIdle = "123123" - err := cfg.validate() - assert.ErrorIs(t, err, ErrFailedToValidateConfig) -} - -func TestValidationInvalidRPCTimeout(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.RPCTimeout = "123123" - err := cfg.validate() - assert.ErrorIs(t, err, ErrFailedToValidateConfig) -} - -func TestValidationRPCTimeoutDuration(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.RPCTimeout = "1s" - err := cfg.validate() - assert.NoError(t, err) -} - -func TestValidationInvalidRPCTimeoutDuration(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.RPCTimeout = "123123" - err := cfg.validate() - assert.ErrorIs(t, err, ErrInvalidRPCTimeout) -} - -func TestValidationRPCMaxConnectionIdleDuration(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.RPCMaxConnectionIdle = "1s" - err := cfg.validate() - assert.NoError(t, err) - duration, err := cfg.Net.RPCMaxConnectionIdleDuration() - assert.NoError(t, err) - assert.Equal(t, duration, 1*time.Second) -} - -func TestValidationInvalidMaxConnectionIdleDuration(t *testing.T) { - cfg := DefaultConfig() - cfg.Net.RPCMaxConnectionIdle = "*ˆ&%*&%" - err := cfg.validate() - assert.ErrorIs(t, err, ErrInvalidRPCMaxConnectionIdle) -} - func TestValidationInvalidLoggingConfig(t *testing.T) { cfg := DefaultConfig() cfg.Log.Level = "546578" diff --git a/config/configfile_yaml.gotmpl b/config/configfile_yaml.gotmpl index 8e011658e9..5346e41378 100644 --- a/config/configfile_yaml.gotmpl +++ b/config/configfile_yaml.gotmpl @@ -37,20 +37,12 @@ net: p2pdisabled: {{ .Net.P2PDisabled }} # Listening address of the P2P network p2paddress: {{ .Net.P2PAddress }} - # Listening address of the RPC endpoint - rpcaddress: {{ .Net.RPCAddress }} - # gRPC server address - tcpaddress: {{ .Net.TCPAddress }} - # Time duration after which a RPC connection to a peer times out - rpctimeout: {{ .Net.RPCTimeout }} # Whether the node has pubsub enabled or not pubsub: {{ .Net.PubSubEnabled }} # Enable libp2p's Circuit relay transport protocol https://docs.libp2p.io/concepts/circuit-relay/ relay: {{ .Net.RelayEnabled }} # List of peers to boostrap with, specified as multiaddresses (https://docs.libp2p.io/concepts/addressing/) peers: {{ .Net.Peers }} - # Amount of time after which an idle RPC connection would be closed - RPCMaxConnectionIdle: {{ .Net.RPCMaxConnectionIdle }} log: # Log level. Options are debug, info, error, fatal diff --git a/core/clock.go b/core/clock.go index 622a36233c..e7b8c7f1f2 100644 --- a/core/clock.go +++ b/core/clock.go @@ -13,7 +13,6 @@ package core import ( "context" - cid "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" ) @@ -24,5 +23,5 @@ type MerkleClock interface { ctx context.Context, delta Delta, ) (ipld.Node, error) // possibly change to AddDeltaNode? - ProcessNode(context.Context, NodeGetter, Delta, ipld.Node) ([]cid.Cid, error) + ProcessNode(context.Context, Delta, ipld.Node) error } diff --git a/core/crdt/composite.go b/core/crdt/composite.go index 5504a19437..68f7824329 100644 --- a/core/crdt/composite.go +++ b/core/crdt/composite.go @@ -119,12 +119,12 @@ func NewCompositeDAG( } } -// GetSchemaID returns the schema ID of the composite DAG CRDT. +// ID returns the schema ID of the composite DAG CRDT. func (c CompositeDAG) ID() string { return c.key.ToString() } -// GetSchemaID returns the schema ID of the composite DAG CRDT. +// Value returns the schema ID of the composite DAG CRDT. func (c CompositeDAG) Value(ctx context.Context) ([]byte, error) { return nil, nil } @@ -196,20 +196,6 @@ func (c CompositeDAG) Merge(ctx context.Context, delta core.Delta) error { } func (c CompositeDAG) deleteWithPrefix(ctx context.Context, key core.DataStoreKey) error { - val, err := c.store.Get(ctx, key.ToDS()) - if err != nil && !errors.Is(err, ds.ErrNotFound) { - return err - } - if !errors.Is(err, ds.ErrNotFound) { - err = c.store.Put(ctx, c.key.WithDeletedFlag().ToDS(), val) - if err != nil { - return err - } - err = c.store.Delete(ctx, key.ToDS()) - if err != nil { - return err - } - } q := query.Query{ Prefix: key.ToString(), } diff --git a/core/delta.go b/core/delta.go index 185a0352e2..cddf51b71c 100644 --- a/core/delta.go +++ b/core/delta.go @@ -29,12 +29,6 @@ type CompositeDelta interface { Links() []DAGLink } -type NetDelta interface { - Delta - GetSchemaID() string - // DocKey() client.DocKey -} - // DAGLink represents a link to another object in a DAG. type DAGLink struct { Name string diff --git a/core/encoding.go b/core/encoding.go index 9482acefbf..ba4926ffc5 100644 --- a/core/encoding.go +++ b/core/encoding.go @@ -109,9 +109,14 @@ func DecodeFieldValue(fieldDesc client.FieldDescription, val any) (any, error) { case client.FieldKind_INT: switch v := val.(type) { case float64: - if v >= 0 { - return uint64(v), nil - } + return int64(v), nil + case int64: + return int64(v), nil + case int: + return int64(v), nil + case uint64: + return int64(v), nil + case uint: return int64(v), nil } } diff --git a/core/key.go b/core/key.go index a8ec5ece2b..0a2529338a 100644 --- a/core/key.go +++ b/core/key.go @@ -11,6 +11,7 @@ package core import ( + "fmt" "strconv" "strings" @@ -41,17 +42,18 @@ const ( ) const ( - COLLECTION = "/collection/names" - COLLECTION_SCHEMA = "/collection/schema" - COLLECTION_SCHEMA_VERSION = "/collection/version/v" - COLLECTION_SCHEMA_VERSION_HISTORY = "/collection/version/h" - COLLECTION_INDEX = "/collection/index" - SCHEMA_MIGRATION = "/schema/migration" - SEQ = "/seq" - PRIMARY_KEY = "/pk" - DATASTORE_DOC_VERSION_FIELD_ID = "v" - REPLICATOR = "/replicator/id" - P2P_COLLECTION = "/p2p/collection" + COLLECTION = "/collection/id" + COLLECTION_NAME = "/collection/name" + COLLECTION_SCHEMA_VERSION = "/collection/version" + COLLECTION_INDEX = "/collection/index" + SCHEMA_MIGRATION = "/schema/migration" + SCHEMA_VERSION = "/schema/version/v" + SCHEMA_VERSION_HISTORY = "/schema/version/h" + SEQ = "/seq" + PRIMARY_KEY = "/pk" + DATASTORE_DOC_VERSION_FIELD_ID = "v" + REPLICATOR = "/replicator/id" + P2P_COLLECTION = "/p2p/collection" ) // Key is an interface that represents a key in the database. @@ -98,26 +100,32 @@ type HeadStoreKey struct { var _ Key = (*HeadStoreKey)(nil) -// CollectionKey points to the current/'head' SchemaVersionId for -// the collection of the given name. +// CollectionKey points to the json serialized description of the +// the collection of the given ID. type CollectionKey struct { - CollectionName string + CollectionID uint32 } var _ Key = (*CollectionKey)(nil) -// CollectionSchemaKey points to the current/'head' SchemaVersionId for -// the collection of the given schema id. -type CollectionSchemaKey struct { - SchemaId string +// CollectionNameKey points to the ID of the collection of the given +// name. +type CollectionNameKey struct { + Name string } -var _ Key = (*CollectionSchemaKey)(nil) +var _ Key = (*CollectionNameKey)(nil) -// CollectionSchemaVersionKey points to schema of a collection at a given -// version. +// CollectionSchemaVersionKey points to nil, but the keys/prefix can be used +// to get collections that are using, or have used a given schema version. +// +// If a collection is updated to a different schema version, the old entry(s) +// of this key will be preserved. +// +// This key should be removed in https://github.com/sourcenetwork/defradb/issues/1085 type CollectionSchemaVersionKey struct { SchemaVersionId string + CollectionID uint32 } var _ Key = (*CollectionSchemaVersionKey)(nil) @@ -132,6 +140,15 @@ type CollectionIndexKey struct { var _ Key = (*CollectionIndexKey)(nil) +// SchemaVersionKey points to the json serialized schema at the specified version. +// +// It's corresponding value is immutable. +type SchemaVersionKey struct { + SchemaVersionID string +} + +var _ Key = (*SchemaVersionKey)(nil) + // SchemaHistoryKey holds the pathway through the schema version history for // any given schema. // @@ -139,7 +156,7 @@ var _ Key = (*CollectionIndexKey)(nil) // If a SchemaHistoryKey does not exist for a given SchemaVersionID it means // that that SchemaVersionID is for the latest version. type SchemaHistoryKey struct { - SchemaID string + SchemaRoot string PreviousSchemaVersionID string } @@ -245,16 +262,32 @@ func NewHeadStoreKey(key string) (HeadStoreKey, error) { // Returns a formatted collection key for the system data store. // It assumes the name of the collection is non-empty. -func NewCollectionKey(name string) CollectionKey { - return CollectionKey{CollectionName: name} +func NewCollectionKey(id uint32) CollectionKey { + return CollectionKey{CollectionID: id} } -func NewCollectionSchemaKey(schemaId string) CollectionSchemaKey { - return CollectionSchemaKey{SchemaId: schemaId} +func NewCollectionNameKey(name string) CollectionNameKey { + return CollectionNameKey{Name: name} } -func NewCollectionSchemaVersionKey(schemaVersionId string) CollectionSchemaVersionKey { - return CollectionSchemaVersionKey{SchemaVersionId: schemaVersionId} +func NewCollectionSchemaVersionKey(schemaVersionId string, collectionID uint32) CollectionSchemaVersionKey { + return CollectionSchemaVersionKey{ + SchemaVersionId: schemaVersionId, + CollectionID: collectionID, + } +} + +func NewCollectionSchemaVersionKeyFromString(key string) (CollectionSchemaVersionKey, error) { + elements := strings.Split(key, "/") + colID, err := strconv.Atoi(elements[len(elements)-1]) + if err != nil { + return CollectionSchemaVersionKey{}, err + } + + return CollectionSchemaVersionKey{ + SchemaVersionId: elements[len(elements)-2], + CollectionID: uint32(colID), + }, nil } // NewCollectionIndexKey creates a new CollectionIndexKey from a collection name and index name. @@ -307,9 +340,13 @@ func (k CollectionIndexKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } -func NewSchemaHistoryKey(schemaId string, previousSchemaVersionID string) SchemaHistoryKey { +func NewSchemaVersionKey(schemaVersionID string) SchemaVersionKey { + return SchemaVersionKey{SchemaVersionID: schemaVersionID} +} + +func NewSchemaHistoryKey(schemaRoot string, previousSchemaVersionID string) SchemaHistoryKey { return SchemaHistoryKey{ - SchemaID: schemaId, + SchemaRoot: schemaRoot, PreviousSchemaVersionID: previousSchemaVersionID, } } @@ -319,14 +356,14 @@ func NewSchemaVersionMigrationKey(schemaVersionID string) SchemaVersionMigration } func NewSchemaHistoryKeyFromString(keyString string) (SchemaHistoryKey, error) { - keyString = strings.TrimPrefix(keyString, COLLECTION_SCHEMA_VERSION_HISTORY+"/") + keyString = strings.TrimPrefix(keyString, SCHEMA_VERSION_HISTORY+"/") elements := strings.Split(keyString, "/") if len(elements) != 2 { return SchemaHistoryKey{}, ErrInvalidKey } return SchemaHistoryKey{ - SchemaID: elements[0], + SchemaRoot: elements[0], PreviousSchemaVersionID: elements[1], }, nil } @@ -572,13 +609,7 @@ func (k PrimaryDataStoreKey) ToString() string { } func (k CollectionKey) ToString() string { - result := COLLECTION - - if k.CollectionName != "" { - result = result + "/" + k.CollectionName - } - - return result + return fmt.Sprintf("%s/%s", COLLECTION, strconv.Itoa(int(k.CollectionID))) } func (k CollectionKey) Bytes() []byte { @@ -589,21 +620,15 @@ func (k CollectionKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } -func (k CollectionSchemaKey) ToString() string { - result := COLLECTION_SCHEMA - - if k.SchemaId != "" { - result = result + "/" + k.SchemaId - } - - return result +func (k CollectionNameKey) ToString() string { + return fmt.Sprintf("%s/%s", COLLECTION_NAME, k.Name) } -func (k CollectionSchemaKey) Bytes() []byte { +func (k CollectionNameKey) Bytes() []byte { return []byte(k.ToString()) } -func (k CollectionSchemaKey) ToDS() ds.Key { +func (k CollectionNameKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } @@ -614,6 +639,10 @@ func (k CollectionSchemaVersionKey) ToString() string { result = result + "/" + k.SchemaVersionId } + if k.CollectionID != 0 { + result = fmt.Sprintf("%s/%s", result, strconv.Itoa(int(k.CollectionID))) + } + return result } @@ -625,11 +654,29 @@ func (k CollectionSchemaVersionKey) ToDS() ds.Key { return ds.NewKey(k.ToString()) } +func (k SchemaVersionKey) ToString() string { + result := SCHEMA_VERSION + + if k.SchemaVersionID != "" { + result = result + "/" + k.SchemaVersionID + } + + return result +} + +func (k SchemaVersionKey) Bytes() []byte { + return []byte(k.ToString()) +} + +func (k SchemaVersionKey) ToDS() ds.Key { + return ds.NewKey(k.ToString()) +} + func (k SchemaHistoryKey) ToString() string { - result := COLLECTION_SCHEMA_VERSION_HISTORY + result := SCHEMA_VERSION_HISTORY - if k.SchemaID != "" { - result = result + "/" + k.SchemaID + if k.SchemaRoot != "" { + result = result + "/" + k.SchemaRoot } if k.PreviousSchemaVersionID != "" { diff --git a/core/log.go b/core/log.go deleted file mode 100644 index 27bb3f84a9..0000000000 --- a/core/log.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package core - -import ( - cid "github.com/ipfs/go-cid" - ipld "github.com/ipfs/go-ipld-format" -) - -// Log represents a new DAG node added to the -// append-only MerkleCRDT Clock graph of a -// document or sub-field. -// Note: This may need to be an interface :/ -type Log struct { - DocKey string - Cid cid.Cid - SchemaID string - Block ipld.Node - Priority uint64 -} diff --git a/core/parser.go b/core/parser.go index ee2d2cfbf1..05a90d0526 100644 --- a/core/parser.go +++ b/core/parser.go @@ -13,7 +13,7 @@ package core import ( "context" - "github.com/graphql-go/graphql/language/ast" + "github.com/sourcenetwork/graphql-go/language/ast" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" @@ -51,8 +51,10 @@ type Parser interface { NewFilterFromString(collectionType string, body string) (immutable.Option[request.Filter], error) // ParseSDL parses an SDL string into a set of collection descriptions. - ParseSDL(ctx context.Context, schemaString string) ([]client.CollectionDescription, error) + ParseSDL(ctx context.Context, schemaString string) ([]client.CollectionDefinition, error) // Adds the given schema to this parser's model. - SetSchema(ctx context.Context, txn datastore.Txn, collections []client.CollectionDescription) error + // + // All collections should be provided, not just new/updated ones. + SetSchema(ctx context.Context, txn datastore.Txn, collections []client.CollectionDefinition) error } diff --git a/datastore/badger/v4/datastore.go b/datastore/badger/v4/datastore.go index 23ed75df53..14b841867f 100644 --- a/datastore/badger/v4/datastore.go +++ b/datastore/badger/v4/datastore.go @@ -11,11 +11,11 @@ import ( "sync" "time" - badger "github.com/dgraph-io/badger/v4" ds "github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-datastore/query" logger "github.com/ipfs/go-log/v2" goprocess "github.com/jbenet/goprocess" + badger "github.com/sourcenetwork/badger/v4" "go.uber.org/zap" "github.com/sourcenetwork/defradb/datastore/iterable" diff --git a/datastore/badger/v4/datastore_test.go b/datastore/badger/v4/datastore_test.go index e978fde92e..1f4f0bb5d2 100644 --- a/datastore/badger/v4/datastore_test.go +++ b/datastore/badger/v4/datastore_test.go @@ -15,9 +15,9 @@ import ( "testing" "time" - "github.com/dgraph-io/badger/v4" ds "github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-datastore/query" + "github.com/sourcenetwork/badger/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/datastore/badger/v4/iterator.go b/datastore/badger/v4/iterator.go index 15d8dfbf6d..c013874230 100644 --- a/datastore/badger/v4/iterator.go +++ b/datastore/badger/v4/iterator.go @@ -17,10 +17,10 @@ import ( "context" "sync" - badger "github.com/dgraph-io/badger/v4" ds "github.com/ipfs/go-datastore" dsq "github.com/ipfs/go-datastore/query" goprocess "github.com/jbenet/goprocess" + badger "github.com/sourcenetwork/badger/v4" "github.com/sourcenetwork/defradb/datastore/iterable" ) diff --git a/datastore/concurrent_txn.go b/datastore/concurrent_txn.go index 2a8aed015e..f46637e99d 100644 --- a/datastore/concurrent_txn.go +++ b/datastore/concurrent_txn.go @@ -49,8 +49,7 @@ func NewConcurrentTxnFrom(ctx context.Context, rootstore ds.TxnDatastore, id uin } rootConcurentTxn := &concurrentTxn{Txn: rootTxn} - root := AsDSReaderWriter(rootConcurentTxn) - multistore := MultiStoreFrom(root) + multistore := MultiStoreFrom(rootConcurentTxn) return &txn{ rootConcurentTxn, multistore, diff --git a/datastore/concurrent_txt_test.go b/datastore/concurrent_txt_test.go index f3e03b8c3e..1a1b43bbab 100644 --- a/datastore/concurrent_txt_test.go +++ b/datastore/concurrent_txt_test.go @@ -14,8 +14,8 @@ import ( "context" "testing" - badger "github.com/dgraph-io/badger/v4" ds "github.com/ipfs/go-datastore" + badger "github.com/sourcenetwork/badger/v4" "github.com/stretchr/testify/require" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" diff --git a/datastore/errors.go b/datastore/errors.go index b08e6d5e70..b248ce6db8 100644 --- a/datastore/errors.go +++ b/datastore/errors.go @@ -14,6 +14,10 @@ import ( "github.com/sourcenetwork/defradb/errors" ) +const ( + errInvalidStoredValue string = "invalid stored value" +) + // Errors returnable from this package. // // This list is incomplete and undefined errors may also be returned. @@ -26,3 +30,9 @@ var ( // ErrNotFound is an error returned when a block is not found. ErrNotFound = errors.New("blockstore: block not found") ) + +// NewErrInvalidStoredValue returns a new error indicating that the stored +// value in the database is invalid. +func NewErrInvalidStoredValue(inner error) error { + return errors.Wrap(errInvalidStoredValue, inner) +} diff --git a/datastore/mocks/txn.go b/datastore/mocks/txn.go index dd3fb60def..0dc71cb46f 100644 --- a/datastore/mocks/txn.go +++ b/datastore/mocks/txn.go @@ -366,6 +366,49 @@ func (_c *Txn_OnSuccess_Call) RunAndReturn(run func(func())) *Txn_OnSuccess_Call return _c } +// Peerstore provides a mock function with given fields: +func (_m *Txn) Peerstore() datastore.DSBatching { + ret := _m.Called() + + var r0 datastore.DSBatching + if rf, ok := ret.Get(0).(func() datastore.DSBatching); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(datastore.DSBatching) + } + } + + return r0 +} + +// Txn_Peerstore_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Peerstore' +type Txn_Peerstore_Call struct { + *mock.Call +} + +// Peerstore is a helper method to define mock.On call +func (_e *Txn_Expecter) Peerstore() *Txn_Peerstore_Call { + return &Txn_Peerstore_Call{Call: _e.mock.On("Peerstore")} +} + +func (_c *Txn_Peerstore_Call) Run(run func()) *Txn_Peerstore_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Txn_Peerstore_Call) Return(_a0 datastore.DSBatching) *Txn_Peerstore_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Txn_Peerstore_Call) RunAndReturn(run func() datastore.DSBatching) *Txn_Peerstore_Call { + _c.Call.Return(run) + return _c +} + // Rootstore provides a mock function with given fields: func (_m *Txn) Rootstore() datastore.DSReaderWriter { ret := _m.Called() diff --git a/datastore/multi.go b/datastore/multi.go index 28e095b7f7..47015e4581 100644 --- a/datastore/multi.go +++ b/datastore/multi.go @@ -12,21 +12,24 @@ package datastore import ( ds "github.com/ipfs/go-datastore" + "github.com/ipfs/go-datastore/namespace" ) var ( // Individual Store Keys - rootStoreKey = ds.NewKey("/db") - systemStoreKey = rootStoreKey.ChildString("/system") - dataStoreKey = rootStoreKey.ChildString("/data") - headStoreKey = rootStoreKey.ChildString("/heads") - blockStoreKey = rootStoreKey.ChildString("/blocks") + rootStoreKey = ds.NewKey("db") + systemStoreKey = rootStoreKey.ChildString("system") + dataStoreKey = rootStoreKey.ChildString("data") + headStoreKey = rootStoreKey.ChildString("heads") + blockStoreKey = rootStoreKey.ChildString("blocks") + peerStoreKey = rootStoreKey.ChildString("ps") ) type multistore struct { root DSReaderWriter data DSReaderWriter head DSReaderWriter + peer DSBatching system DSReaderWriter // block DSReaderWriter dag DAGStore @@ -35,14 +38,15 @@ type multistore struct { var _ MultiStore = (*multistore)(nil) // MultiStoreFrom creates a MultiStore from a root datastore. -func MultiStoreFrom(rootstore DSReaderWriter) MultiStore { - block := prefix(rootstore, blockStoreKey) +func MultiStoreFrom(rootstore ds.Datastore) MultiStore { + rootRW := AsDSReaderWriter(rootstore) ms := &multistore{ - root: rootstore, - data: prefix(rootstore, dataStoreKey), - head: prefix(rootstore, headStoreKey), - system: prefix(rootstore, systemStoreKey), - dag: NewDAGStore(block), + root: rootRW, + data: prefix(rootRW, dataStoreKey), + head: prefix(rootRW, headStoreKey), + peer: namespace.Wrap(rootstore, peerStoreKey), + system: prefix(rootRW, systemStoreKey), + dag: NewDAGStore(prefix(rootRW, blockStoreKey)), } return ms @@ -58,6 +62,11 @@ func (ms multistore) Headstore() DSReaderWriter { return ms.head } +// Peerstore implements MultiStore. +func (ms multistore) Peerstore() DSBatching { + return ms.peer +} + // DAGstore implements MultiStore. func (ms multistore) DAGstore() DAGStore { return ms.dag diff --git a/datastore/prefix_query.go b/datastore/prefix_query.go new file mode 100644 index 0000000000..7150aebe48 --- /dev/null +++ b/datastore/prefix_query.go @@ -0,0 +1,81 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package datastore + +import ( + "context" + "encoding/json" + + ds "github.com/ipfs/go-datastore" + + "github.com/ipfs/go-datastore/query" +) + +// DeserializePrefix deserializes all elements with the given prefix from the given storage. +// It returns the keys and their corresponding elements. +func DeserializePrefix[T any]( + ctx context.Context, + prefix string, + store ds.Read, +) ([]string, []T, error) { + q, err := store.Query(ctx, query.Query{Prefix: prefix}) + if err != nil { + return nil, nil, err + } + + keys := make([]string, 0) + elements := make([]T, 0) + for res := range q.Next() { + if res.Error != nil { + _ = q.Close() + return nil, nil, res.Error + } + + var element T + err = json.Unmarshal(res.Value, &element) + if err != nil { + _ = q.Close() + return nil, nil, NewErrInvalidStoredValue(err) + } + keys = append(keys, res.Key) + elements = append(elements, element) + } + if err := q.Close(); err != nil { + return nil, nil, err + } + return keys, elements, nil +} + +// FetchKeysForPrefix fetches all keys with the given prefix from the given storage. +func FetchKeysForPrefix( + ctx context.Context, + prefix string, + store ds.Read, +) ([]ds.Key, error) { + q, err := store.Query(ctx, query.Query{Prefix: prefix}) + if err != nil { + return nil, err + } + + keys := make([]ds.Key, 0) + for res := range q.Next() { + if res.Error != nil { + _ = q.Close() + return nil, res.Error + } + keys = append(keys, ds.NewKey(res.Key)) + } + if err = q.Close(); err != nil { + return nil, err + } + + return keys, nil +} diff --git a/datastore/store.go b/datastore/store.go index 4b299ebc61..759eef01db 100644 --- a/datastore/store.go +++ b/datastore/store.go @@ -40,6 +40,11 @@ type MultiStore interface { // under the /head namespace Headstore() DSReaderWriter + // Peerstore is a wrapped root DSReaderWriter + // as a ds.Batching, embedded into a DSBatching + // under the /peers namespace + Peerstore() DSBatching + // DAGstore is a wrapped root DSReaderWriter // as a Blockstore, embedded into a DAGStore // under the /blocks namespace @@ -67,3 +72,8 @@ type DSReaderWriter interface { type DAGStore interface { blockstore.Blockstore } + +// DSBatching wraps the Batching interface from go-datastore +type DSBatching interface { + ds.Batching +} diff --git a/datastore/txn.go b/datastore/txn.go index d0fa3d2f35..acc7a53193 100644 --- a/datastore/txn.go +++ b/datastore/txn.go @@ -66,7 +66,7 @@ func NewTxnFrom(ctx context.Context, rootstore ds.TxnDatastore, id uint64, reado if err != nil { return nil, err } - multistore := MultiStoreFrom(rootTxn) + multistore := MultiStoreFrom(ShimTxnStore{rootTxn}) return &txn{ rootTxn, multistore, @@ -82,8 +82,7 @@ func NewTxnFrom(ctx context.Context, rootstore ds.TxnDatastore, id uint64, reado return nil, err } - root := AsDSReaderWriter(ShimTxnStore{rootTxn}) - multistore := MultiStoreFrom(root) + multistore := MultiStoreFrom(ShimTxnStore{rootTxn}) return &txn{ rootTxn, multistore, diff --git a/datastore/txn_test.go b/datastore/txn_test.go index e46dbdae8f..f5170146d6 100644 --- a/datastore/txn_test.go +++ b/datastore/txn_test.go @@ -14,8 +14,8 @@ import ( "context" "testing" - badger "github.com/dgraph-io/badger/v4" ds "github.com/ipfs/go-datastore" + badger "github.com/sourcenetwork/badger/v4" "github.com/stretchr/testify/require" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" diff --git a/datastore/wrappedstore_test.go b/datastore/wrappedstore_test.go index 200af3e3f7..a4ca65f4af 100644 --- a/datastore/wrappedstore_test.go +++ b/datastore/wrappedstore_test.go @@ -14,9 +14,9 @@ import ( "context" "testing" - badger "github.com/dgraph-io/badger/v4" ds "github.com/ipfs/go-datastore" "github.com/ipfs/go-datastore/query" + badger "github.com/sourcenetwork/badger/v4" "github.com/stretchr/testify/require" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" diff --git a/db/backup.go b/db/backup.go index 89925a6c53..e2958d1c96 100644 --- a/db/backup.go +++ b/db/backup.go @@ -171,7 +171,7 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client if firstCol { firstCol = false } else { - // add collection seperator + // add collection separator err = writeString(f, ",", ",\n", config.Pretty) if err != nil { return err @@ -199,7 +199,7 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client if firstDoc { firstDoc = false } else { - // add document seperator + // add document separator err = writeString(f, ",", ",\n", config.Pretty) if err != nil { return err @@ -212,7 +212,7 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client isSelfReference := false refFieldName := "" - // replace any foreing key if it needs to be changed + // replace any foreign key if it needs to be changed for _, field := range col.Schema().Fields { switch field.Kind { case client.FieldKind_FOREIGN_OBJECT: @@ -250,9 +250,6 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client return err } - // Temporary until https://github.com/sourcenetwork/defradb/issues/1681 is resolved. - ensureIntIsInt(foreignCol.Schema().Fields, oldForeignDoc) - delete(oldForeignDoc, "_key") if foreignDoc.Key().String() == foreignDocKey.String() { delete(oldForeignDoc, field.Name+request.RelatedObjectID) @@ -289,9 +286,6 @@ func (db *db) basicExport(ctx context.Context, txn datastore.Txn, config *client return err } - // Temporary until https://github.com/sourcenetwork/defradb/issues/1681 is resolved. - ensureIntIsInt(col.Schema().Fields, docM) - delete(docM, "_key") if isSelfReference { delete(docM, refFieldName) @@ -374,19 +368,3 @@ func writeString(f *os.File, normal, pretty string, isPretty bool) error { } return nil } - -// Temporary until https://github.com/sourcenetwork/defradb/issues/1681 is resolved. -func ensureIntIsInt(fields []client.FieldDescription, docMap map[string]any) { - for _, field := range fields { - if field.Kind == client.FieldKind_INT { - if val, ok := docMap[field.Name]; ok { - switch v := val.(type) { - case uint64: - docMap[field.Name] = int(v) - case int64: - docMap[field.Name] = int(v) - } - } - } - } -} diff --git a/db/backup_test.go b/db/backup_test.go index 2f89f54a07..f0e7a6e338 100644 --- a/db/backup_test.go +++ b/db/backup_test.go @@ -25,7 +25,7 @@ func TestBasicExport_WithNormalFormatting_NoError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -87,7 +87,7 @@ func TestBasicExport_WithPrettyFormatting_NoError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -149,7 +149,7 @@ func TestBasicExport_WithSingleCollection_NoError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -211,7 +211,7 @@ func TestBasicExport_WithMultipleCollectionsAndUpdate_NoError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -285,7 +285,7 @@ func TestBasicExport_EnsureFileOverwrite_NoError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -355,7 +355,7 @@ func TestBasicImport_WithMultipleCollectionsAndObjects_NoError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -414,7 +414,7 @@ func TestBasicImport_WithJSONArray_ReturnError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -449,7 +449,7 @@ func TestBasicImport_WithObjectCollection_ReturnError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -484,7 +484,7 @@ func TestBasicImport_WithInvalidFilepath_ReturnError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String @@ -520,7 +520,7 @@ func TestBasicImport_WithInvalidCollection_ReturnError(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) require.NoError(t, err) - defer db.Close(ctx) + defer db.Close() _, err = db.AddSchema(ctx, `type User { name: String diff --git a/db/base/collection_keys.go b/db/base/collection_keys.go index 0276975630..6a762ff180 100644 --- a/db/base/collection_keys.go +++ b/db/base/collection_keys.go @@ -34,6 +34,7 @@ func MakeDocKey(col client.CollectionDescription, docKey string) core.DataStoreK func MakePrimaryIndexKeyForCRDT( c client.CollectionDescription, + schema client.SchemaDescription, ctype client.CType, key core.DataStoreKey, fieldName string, @@ -42,19 +43,12 @@ func MakePrimaryIndexKeyForCRDT( case client.COMPOSITE: return MakeCollectionKey(c).WithInstanceInfo(key).WithFieldId(core.COMPOSITE_NAMESPACE), nil case client.LWW_REGISTER: - fieldKey := getFieldKey(c, key, fieldName) - return MakeCollectionKey(c).WithInstanceInfo(fieldKey), nil - } - return core.DataStoreKey{}, ErrInvalidCrdtType -} + field, ok := c.GetFieldByName(fieldName, &schema) + if !ok { + return core.DataStoreKey{}, client.NewErrFieldNotExist(fieldName) + } -func getFieldKey( - c client.CollectionDescription, - key core.DataStoreKey, - fieldName string, -) core.DataStoreKey { - if !c.Schema.IsEmpty() { - return key.WithFieldId(fmt.Sprint(c.Schema.GetFieldKey(fieldName))) + return MakeCollectionKey(c).WithInstanceInfo(key).WithFieldId(fmt.Sprint(field.ID)), nil } - return key.WithFieldId(fieldName) + return core.DataStoreKey{}, ErrInvalidCrdtType } diff --git a/db/collection.go b/db/collection.go index a9d3f5c403..b4586be89b 100644 --- a/db/collection.go +++ b/db/collection.go @@ -13,7 +13,6 @@ package db import ( "bytes" "context" - "encoding/json" "fmt" "strconv" "strings" @@ -28,14 +27,13 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/core" - ccid "github.com/sourcenetwork/defradb/core/cid" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/db/base" + "github.com/sourcenetwork/defradb/db/description" "github.com/sourcenetwork/defradb/db/fetcher" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/lens" - "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/merkle/crdt" ) @@ -54,11 +52,7 @@ type collection struct { // of the operation in question. txn immutable.Option[datastore.Txn] - colID uint32 - - schemaID string - - desc client.CollectionDescription + def client.CollectionDefinition indexes []CollectionIndex fetcherFactory func() fetcher.Fetcher @@ -71,43 +65,11 @@ type collection struct { // CollectionOptions object. // NewCollection returns a pointer to a newly instanciated DB Collection -func (db *db) newCollection(desc client.CollectionDescription) (*collection, error) { - if desc.Name == "" { - return nil, client.NewErrUninitializeProperty("Collection", "Name") - } - - if len(desc.Schema.Fields) == 0 { - return nil, client.NewErrUninitializeProperty("Collection", "Fields") - } - - docKeyField := desc.Schema.Fields[0] - if docKeyField.Kind != client.FieldKind_DocKey || docKeyField.Name != request.KeyFieldName { - return nil, ErrSchemaFirstFieldDocKey - } - - for i, field := range desc.Schema.Fields { - if field.Name == "" { - return nil, client.NewErrUninitializeProperty("Collection.Schema", "Name") - } - if field.Kind == client.FieldKind_None { - return nil, client.NewErrUninitializeProperty("Collection.Schema", "FieldKind") - } - if (field.Kind != client.FieldKind_DocKey && !field.IsObject()) && - field.Typ == client.NONE_CRDT { - return nil, client.NewErrUninitializeProperty("Collection.Schema", "CRDT type") - } - desc.Schema.Fields[i].ID = client.FieldID(i) - } - +func (db *db) newCollection(desc client.CollectionDescription, schema client.SchemaDescription) *collection { return &collection{ - db: db, - desc: client.CollectionDescription{ - ID: desc.ID, - Name: desc.Name, - Schema: desc.Schema, - }, - colID: desc.ID, - }, nil + db: db, + def: client.CollectionDefinition{Description: desc, Schema: schema}, + } } // newFetcher returns a new fetcher instance for this collection. @@ -130,11 +92,12 @@ func (c *collection) newFetcher() fetcher.Fetcher { func (db *db) createCollection( ctx context.Context, txn datastore.Txn, - desc client.CollectionDescription, + def client.CollectionDefinition, ) (client.Collection, error) { - // check if collection by this name exists - collectionKey := core.NewCollectionKey(desc.Name) - exists, err := txn.Systemstore().Has(ctx, collectionKey.ToDS()) + schema := def.Schema + desc := def.Description + + exists, err := description.HasCollectionByName(ctx, txn, desc.Name) if err != nil { return nil, err } @@ -151,104 +114,64 @@ func (db *db) createCollection( return nil, err } desc.ID = uint32(colID) - col, err := db.newCollection(desc) - if err != nil { - return nil, err - } - - // Local elements such as secondary indexes should be excluded - // from the (global) schemaId. - globalSchemaBuf, err := json.Marshal(struct { - Name string - Schema client.SchemaDescription - }{col.desc.Name, col.desc.Schema}) - if err != nil { - return nil, err - } - - // add a reference to this DB by desc hash - cid, err := ccid.NewSHA256CidV1(globalSchemaBuf) - if err != nil { - return nil, err - } - schemaID := cid.String() - col.schemaID = schemaID - - // For new schemas the initial version id will match the schema id - schemaVersionID := schemaID - - col.desc.Schema.VersionID = schemaVersionID - col.desc.Schema.SchemaID = schemaID - - // buffer must include all the ids, as it is saved and loaded from the store later. - buf, err := json.Marshal(col.desc) - if err != nil { - return nil, err - } - collectionSchemaVersionKey := core.NewCollectionSchemaVersionKey(schemaVersionID) - // Whilst the schemaVersionKey is global, the data persisted at the key's location - // is local to the node (the global only elements are not useful beyond key generation). - err = txn.Systemstore().Put(ctx, collectionSchemaVersionKey.ToDS(), buf) + schema, err = description.CreateSchemaVersion(ctx, txn, schema) if err != nil { return nil, err } + desc.SchemaVersionID = schema.VersionID - collectionSchemaKey := core.NewCollectionSchemaKey(schemaID) - err = txn.Systemstore().Put(ctx, collectionSchemaKey.ToDS(), []byte(schemaVersionID)) + desc, err = description.SaveCollection(ctx, txn, desc) if err != nil { return nil, err } - err = txn.Systemstore().Put(ctx, collectionKey.ToDS(), []byte(schemaVersionID)) - if err != nil { - return nil, err - } - - log.Debug( - ctx, - "Created collection", - logging.NewKV("Name", col.Name()), - logging.NewKV("SchemaID", col.SchemaID()), - ) - + col := db.newCollection(desc, schema) for _, index := range desc.Indexes { if _, err := col.createIndex(ctx, txn, index); err != nil { return nil, err } } - return col, nil + + return db.getCollectionByName(ctx, txn, desc.Name) } -// updateCollection updates the persisted collection description matching the name of the given +// updateSchema updates the persisted schema description matching the name of the given // description, to the values in the given description. // -// It will validate the given description using [ValidateUpdateCollectionTxn] before updating it. +// It will validate the given description using [validateUpdateSchema] before updating it. // -// The collection (including the schema version ID) will only be updated if any changes have actually +// The schema (including the schema version ID) will only be updated if any changes have actually // been made, if the given description matches the current persisted description then no changes will be // applied. -func (db *db) updateCollection( +func (db *db) updateSchema( ctx context.Context, txn datastore.Txn, - existingDescriptionsByName map[string]client.CollectionDescription, - proposedDescriptionsByName map[string]client.CollectionDescription, - desc client.CollectionDescription, -) (client.Collection, error) { - hasChanged, err := db.validateUpdateCollection(ctx, txn, existingDescriptionsByName, proposedDescriptionsByName, desc) + existingSchemaByName map[string]client.SchemaDescription, + proposedDescriptionsByName map[string]client.SchemaDescription, + schema client.SchemaDescription, + setAsDefaultVersion bool, +) error { + hasChanged, err := db.validateUpdateSchema( + ctx, + txn, + existingSchemaByName, + proposedDescriptionsByName, + schema, + ) if err != nil { - return nil, err + return err } if !hasChanged { - return db.getCollectionByName(ctx, txn, desc.Name) + return nil } - for _, field := range desc.Schema.Fields { + for _, field := range schema.Fields { if field.RelationType.IsSet(client.Relation_Type_ONE) { idFieldName := field.Name + "_id" - if _, ok := desc.Schema.GetField(idFieldName); !ok { - desc.Schema.Fields = append(desc.Schema.Fields, client.FieldDescription{ + if _, ok := schema.GetField(idFieldName); !ok { + schema.Fields = append(schema.Fields, client.FieldDescription{ Name: idFieldName, Kind: client.FieldKind_DocKey, RelationType: client.Relation_Type_INTERNAL_ID, @@ -258,82 +181,57 @@ func (db *db) updateCollection( } } - for i, field := range desc.Schema.Fields { - if field.ID == client.FieldID(0) { - // This is not wonderful and will probably break when we add the ability - // to delete fields, however it is good enough for now and matches the - // create behaviour. - field.ID = client.FieldID(i) - desc.Schema.Fields[i] = field - } - + for i, field := range schema.Fields { if field.Typ == client.NONE_CRDT { // If no CRDT Type has been provided, default to LWW_REGISTER. field.Typ = client.LWW_REGISTER - desc.Schema.Fields[i] = field + schema.Fields[i] = field } } - globalSchemaBuf, err := json.Marshal(desc.Schema) + previousVersionID := schema.VersionID + schema, err = description.CreateSchemaVersion(ctx, txn, schema) if err != nil { - return nil, err - } - - cid, err := ccid.NewSHA256CidV1(globalSchemaBuf) - if err != nil { - return nil, err - } - previousSchemaVersionID := desc.Schema.VersionID - schemaVersionID := cid.String() - desc.Schema.VersionID = schemaVersionID - - buf, err := json.Marshal(desc) - if err != nil { - return nil, err + return err } - collectionSchemaVersionKey := core.NewCollectionSchemaVersionKey(schemaVersionID) - // Whilst the schemaVersionKey is global, the data persisted at the key's location - // is local to the node (the global only elements are not useful beyond key generation). - err = txn.Systemstore().Put(ctx, collectionSchemaVersionKey.ToDS(), buf) - if err != nil { - return nil, err - } + if setAsDefaultVersion { + cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, previousVersionID) + if err != nil { + return err + } - collectionSchemaKey := core.NewCollectionSchemaKey(desc.Schema.SchemaID) - err = txn.Systemstore().Put(ctx, collectionSchemaKey.ToDS(), []byte(schemaVersionID)) - if err != nil { - return nil, err - } + for _, col := range cols { + col.SchemaVersionID = schema.VersionID - collectionKey := core.NewCollectionKey(desc.Name) - err = txn.Systemstore().Put(ctx, collectionKey.ToDS(), []byte(schemaVersionID)) - if err != nil { - return nil, err - } + col, err = description.SaveCollection(ctx, txn, col) + if err != nil { + return err + } - schemaVersionHistoryKey := core.NewSchemaHistoryKey(desc.Schema.SchemaID, previousSchemaVersionID) - err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte(schemaVersionID)) - if err != nil { - return nil, err + err = db.setDefaultSchemaVersionExplicit(ctx, txn, col.Name, schema.VersionID) + if err != nil { + return err + } + } } - return db.getCollectionByName(ctx, txn, desc.Name) + return nil } -// validateUpdateCollection validates that the given collection description is a valid update. +// validateUpdateSchema validates that the given schema description is a valid update. // // Will return true if the given description differs from the current persisted state of the -// collection. Will return an error if it fails validation. -func (db *db) validateUpdateCollection( +// schema. Will return an error if it fails validation. +func (db *db) validateUpdateSchema( ctx context.Context, txn datastore.Txn, - existingDescriptionsByName map[string]client.CollectionDescription, - proposedDescriptionsByName map[string]client.CollectionDescription, - proposedDesc client.CollectionDescription, + existingDescriptionsByName map[string]client.SchemaDescription, + proposedDescriptionsByName map[string]client.SchemaDescription, + proposedDesc client.SchemaDescription, ) (bool, error) { if proposedDesc.Name == "" { - return false, ErrCollectionNameEmpty + return false, ErrSchemaNameEmpty } existingDesc, collectionExists := existingDescriptionsByName[proposedDesc.Name] @@ -341,54 +239,49 @@ func (db *db) validateUpdateCollection( return false, NewErrAddCollectionWithPatch(proposedDesc.Name) } - if proposedDesc.ID != existingDesc.ID { - return false, NewErrCollectionIDDoesntMatch(proposedDesc.Name, existingDesc.ID, proposedDesc.ID) - } - - if proposedDesc.Schema.SchemaID != existingDesc.Schema.SchemaID { - return false, NewErrSchemaIDDoesntMatch( + if proposedDesc.Root != existingDesc.Root { + return false, NewErrSchemaRootDoesntMatch( proposedDesc.Name, - existingDesc.Schema.SchemaID, - proposedDesc.Schema.SchemaID, + existingDesc.Root, + proposedDesc.Root, ) } - if proposedDesc.Schema.Name != existingDesc.Schema.Name { + if proposedDesc.Name != existingDesc.Name { // There is actually little reason to not support this atm besides controlling the surface area // of the new feature. Changing this should not break anything, but it should be tested first. - return false, NewErrCannotModifySchemaName(existingDesc.Schema.Name, proposedDesc.Schema.Name) + return false, NewErrCannotModifySchemaName(existingDesc.Name, proposedDesc.Name) } - if proposedDesc.Schema.VersionID != "" && proposedDesc.Schema.VersionID != existingDesc.Schema.VersionID { + if proposedDesc.VersionID != "" && proposedDesc.VersionID != existingDesc.VersionID { // If users specify this it will be overwritten, an error is prefered to quietly ignoring it. return false, ErrCannotSetVersionID } - hasChangedFields, err := validateUpdateCollectionFields(proposedDescriptionsByName, existingDesc, proposedDesc) + hasChangedFields, err := validateUpdateSchemaFields(proposedDescriptionsByName, existingDesc, proposedDesc) if err != nil { return hasChangedFields, err } - hasChangedIndexes, err := validateUpdateCollectionIndexes(existingDesc.Indexes, proposedDesc.Indexes) - return hasChangedFields || hasChangedIndexes, err + return hasChangedFields, err } -func validateUpdateCollectionFields( - descriptionsByName map[string]client.CollectionDescription, - existingDesc client.CollectionDescription, - proposedDesc client.CollectionDescription, +func validateUpdateSchemaFields( + descriptionsByName map[string]client.SchemaDescription, + existingDesc client.SchemaDescription, + proposedDesc client.SchemaDescription, ) (bool, error) { hasChanged := false existingFieldsByID := map[client.FieldID]client.FieldDescription{} existingFieldIndexesByName := map[string]int{} - for i, field := range existingDesc.Schema.Fields { + for i, field := range existingDesc.Fields { existingFieldIndexesByName[field.Name] = i existingFieldsByID[field.ID] = field } newFieldNames := map[string]struct{}{} newFieldIds := map[client.FieldID]struct{}{} - for proposedIndex, proposedField := range proposedDesc.Schema.Fields { + for proposedIndex, proposedField := range proposedDesc.Fields { var existingField client.FieldDescription var fieldAlreadyExists bool if proposedField.ID != client.FieldID(0) || @@ -456,7 +349,7 @@ func validateUpdateCollectionFields( if proposedField.Kind == client.FieldKind_FOREIGN_OBJECT { idFieldName := proposedField.Name + request.RelatedObjectID - idField, idFieldFound := proposedDesc.Schema.GetField(idFieldName) + idField, idFieldFound := proposedDesc.GetField(idFieldName) if idFieldFound { if idField.Kind != client.FieldKind_DocKey { return false, NewErrRelationalFieldIDInvalidType(idField.Name, client.FieldKind_DocKey, idField.Kind) @@ -478,7 +371,7 @@ func validateUpdateCollectionFields( var relatedFieldFound bool var relatedField client.FieldDescription - for _, field := range relatedDesc.Schema.Fields { + for _, field := range relatedDesc.Fields { if field.RelationName == proposedField.RelationName && !field.RelationType.IsSet(client.Relation_Type_INTERNAL_ID) && !(relatedDesc.Name == proposedDesc.Name && field.Name == proposedField.Name) { @@ -552,7 +445,7 @@ func validateUpdateCollectionFields( newFieldIds[proposedField.ID] = struct{}{} } - for _, field := range existingDesc.Schema.Fields { + for _, field := range existingDesc.Fields { if _, stillExists := newFieldIds[field.ID]; !stillExists { return false, NewErrCannotDeleteField(field.Name, field.ID) } @@ -560,74 +453,96 @@ func validateUpdateCollectionFields( return hasChanged, nil } -func validateUpdateCollectionIndexes( - existingIndexes []client.IndexDescription, - proposedIndexes []client.IndexDescription, -) (bool, error) { - existingNameToIndex := map[string]client.IndexDescription{} - for _, index := range existingIndexes { - existingNameToIndex[index.Name] = index - } - for _, proposedIndex := range proposedIndexes { - if existingIndex, exists := existingNameToIndex[proposedIndex.Name]; exists { - if len(existingIndex.Fields) != len(proposedIndex.Fields) { - return false, ErrCanNotChangeIndexWithPatch - } - for i := range existingIndex.Fields { - if existingIndex.Fields[i] != proposedIndex.Fields[i] { - return false, ErrCanNotChangeIndexWithPatch - } - } - delete(existingNameToIndex, proposedIndex.Name) - } else { - return false, NewErrCannotAddIndexWithPatch(proposedIndex.Name) - } +func (db *db) setDefaultSchemaVersion( + ctx context.Context, + txn datastore.Txn, + schemaVersionID string, +) error { + if schemaVersionID == "" { + return ErrSchemaVersionIDEmpty } - if len(existingNameToIndex) > 0 { - for _, index := range existingNameToIndex { - return false, NewErrCannotDropIndexWithPatch(index.Name) + + schema, err := description.GetSchemaVersion(ctx, txn, schemaVersionID) + if err != nil { + return err + } + + colDescs, err := description.GetCollectionsBySchemaRoot(ctx, txn, schema.Root) + if err != nil { + return err + } + + for _, col := range colDescs { + col.SchemaVersionID = schemaVersionID + col, err = description.SaveCollection(ctx, txn, col) + if err != nil { + return err } } - return false, nil + + cols, err := db.getAllCollections(ctx, txn) + if err != nil { + return err + } + + definitions := make([]client.CollectionDefinition, len(cols)) + for i, col := range cols { + definitions[i] = col.Definition() + } + + return db.parser.SetSchema(ctx, txn, definitions) } -// getCollectionByVersionId returns the [*collection] at the given [schemaVersionId] version. -// -// Will return an error if the given key is empty, or not found. -func (db *db) getCollectionByVersionID( +func (db *db) setDefaultSchemaVersionExplicit( ctx context.Context, txn datastore.Txn, - schemaVersionId string, -) (*collection, error) { - if schemaVersionId == "" { - return nil, ErrSchemaVersionIDEmpty + collectionName string, + schemaVersionID string, +) error { + if schemaVersionID == "" { + return ErrSchemaVersionIDEmpty } - key := core.NewCollectionSchemaVersionKey(schemaVersionId) - buf, err := txn.Systemstore().Get(ctx, key.ToDS()) + col, err := description.GetCollectionByName(ctx, txn, collectionName) if err != nil { - return nil, err + return err } - var desc client.CollectionDescription - err = json.Unmarshal(buf, &desc) + col.SchemaVersionID = schemaVersionID + + _, err = description.SaveCollection(ctx, txn, col) + return err +} + +// getCollectionsByVersionId returns the [*collection]s at the given [schemaVersionId] version. +// +// Will return an error if the given key is empty, or if none are found. +func (db *db) getCollectionsByVersionID( + ctx context.Context, + txn datastore.Txn, + schemaVersionId string, +) ([]*collection, error) { + cols, err := description.GetCollectionsBySchemaVersionID(ctx, txn, schemaVersionId) if err != nil { return nil, err } - col := &collection{ - db: db, - desc: desc, - colID: desc.ID, - schemaID: desc.Schema.SchemaID, - } + collections := make([]*collection, len(cols)) + for i, col := range cols { + schema, err := description.GetSchemaVersion(ctx, txn, col.SchemaVersionID) + if err != nil { + return nil, err + } - err = col.loadIndexes(ctx, txn) - if err != nil { - return nil, err + collections[i] = db.newCollection(col, schema) + + err = collections[i].loadIndexes(ctx, txn) + if err != nil { + return nil, err + } } - return col, nil + return collections, nil } // getCollectionByName returns an existing collection within the database. @@ -636,67 +551,83 @@ func (db *db) getCollectionByName(ctx context.Context, txn datastore.Txn, name s return nil, ErrCollectionNameEmpty } - key := core.NewCollectionKey(name) - buf, err := txn.Systemstore().Get(ctx, key.ToDS()) + col, err := description.GetCollectionByName(ctx, txn, name) if err != nil { return nil, err } - schemaVersionId := string(buf) - return db.getCollectionByVersionID(ctx, txn, schemaVersionId) + schema, err := description.GetSchemaVersion(ctx, txn, col.SchemaVersionID) + if err != nil { + return nil, err + } + + collection := db.newCollection(col, schema) + err = collection.loadIndexes(ctx, txn) + if err != nil { + return nil, err + } + + return collection, nil } -// getCollectionBySchemaID returns an existing collection using the schema hash ID. -func (db *db) getCollectionBySchemaID( +// getCollectionsBySchemaRoot returns all existing collections using the schema root. +func (db *db) getCollectionsBySchemaRoot( ctx context.Context, txn datastore.Txn, - schemaID string, -) (client.Collection, error) { - if schemaID == "" { - return nil, ErrSchemaIDEmpty + schemaRoot string, +) ([]client.Collection, error) { + if schemaRoot == "" { + return nil, ErrSchemaRootEmpty } - key := core.NewCollectionSchemaKey(schemaID) - buf, err := txn.Systemstore().Get(ctx, key.ToDS()) + cols, err := description.GetCollectionsBySchemaRoot(ctx, txn, schemaRoot) if err != nil { return nil, err } - schemaVersionId := string(buf) - return db.getCollectionByVersionID(ctx, txn, schemaVersionId) + collections := make([]client.Collection, len(cols)) + for i, col := range cols { + schema, err := description.GetSchemaVersion(ctx, txn, col.SchemaVersionID) + if err != nil { + return nil, err + } + + collection := db.newCollection(col, schema) + collections[i] = collection + + err = collection.loadIndexes(ctx, txn) + if err != nil { + return nil, err + } + } + + return collections, nil } // getAllCollections gets all the currently defined collections. func (db *db) getAllCollections(ctx context.Context, txn datastore.Txn) ([]client.Collection, error) { - // create collection system prefix query - prefix := core.NewCollectionKey("") - q, err := txn.Systemstore().Query(ctx, query.Query{ - Prefix: prefix.ToString(), - }) + cols, err := description.GetCollections(ctx, txn) if err != nil { - return nil, NewErrFailedToCreateCollectionQuery(err) + return nil, err } - defer func() { - if err := q.Close(); err != nil { - log.ErrorE(ctx, "Failed to close collection query", err) - } - }() - cols := make([]client.Collection, 0) - for res := range q.Next() { - if res.Error != nil { + collections := make([]client.Collection, len(cols)) + for i, col := range cols { + schema, err := description.GetSchemaVersion(ctx, txn, col.SchemaVersionID) + if err != nil { return nil, err } - schemaVersionId := string(res.Value) - col, err := db.getCollectionByVersionID(ctx, txn, schemaVersionId) + collection := db.newCollection(col, schema) + collections[i] = collection + + err = collection.loadIndexes(ctx, txn) if err != nil { - return nil, NewErrFailedToGetCollection(schemaVersionId, err) + return nil, err } - cols = append(cols, col) } - return cols, nil + return collections, nil } // GetAllDocKeys returns all the document keys that exist in the collection. @@ -717,7 +648,7 @@ func (c *collection) getAllDocKeysChan( txn datastore.Txn, ) (<-chan client.DocKeysResult, error) { prefix := core.PrimaryDataStoreKey{ // empty path for all keys prefix - CollectionId: fmt.Sprint(c.colID), + CollectionId: fmt.Sprint(c.ID()), } q, err := txn.Datastore().Query(ctx, query.Query{ Prefix: prefix.ToString(), @@ -772,26 +703,30 @@ func (c *collection) getAllDocKeysChan( // Description returns the client.CollectionDescription. func (c *collection) Description() client.CollectionDescription { - return c.desc + return c.Definition().Description } // Name returns the collection name. func (c *collection) Name() string { - return c.desc.Name + return c.Description().Name } // Schema returns the Schema of the collection. func (c *collection) Schema() client.SchemaDescription { - return c.desc.Schema + return c.Definition().Schema } // ID returns the ID of the collection. func (c *collection) ID() uint32 { - return c.colID + return c.Description().ID +} + +func (c *collection) SchemaRoot() string { + return c.Schema().Root } -func (c *collection) SchemaID() string { - return c.schemaID +func (c *collection) Definition() client.CollectionDefinition { + return c.def } // WithTxn returns a new instance of the collection, with a transaction @@ -800,9 +735,7 @@ func (c *collection) WithTxn(txn datastore.Txn) client.Collection { return &collection{ db: c.db, txn: immutable.Some(txn), - desc: c.desc, - colID: c.colID, - schemaID: c.schemaID, + def: c.def, indexes: c.indexes, fetcherFactory: c.fetcherFactory, } @@ -860,7 +793,7 @@ func (c *collection) getKeysFromDoc( func (c *collection) create(ctx context.Context, txn datastore.Txn, doc *client.Document) error { // This has to be done before dockey verification happens in the next step. - if err := doc.RemapAliasFieldsAndDockey(c.desc.Schema.Fields); err != nil { + if err := doc.RemapAliasFieldsAndDockey(c.Schema().Fields); err != nil { return err } @@ -1016,7 +949,7 @@ func (c *collection) save( return cid.Undef, client.NewErrFieldNotExist(k) } - fieldDescription, valid := c.desc.Schema.GetField(k) + fieldDescription, valid := c.Schema().GetField(k) if !valid { return cid.Undef, client.NewErrFieldNotExist(k) } @@ -1025,7 +958,7 @@ func (c *collection) save( if isSecondaryRelationID { primaryId := val.Value().(string) - err = c.patchPrimaryDoc(ctx, txn, relationFieldDescription, primaryKey.DocKey, primaryId) + err = c.patchPrimaryDoc(ctx, txn, c.Name(), relationFieldDescription, primaryKey.DocKey, primaryId) if err != nil { return cid.Undef, err } @@ -1085,11 +1018,11 @@ func (c *collection) save( func() { c.db.events.Updates.Value().Publish( events.Update{ - DocKey: doc.Key().String(), - Cid: headNode.Cid(), - SchemaID: c.schemaID, - Block: headNode, - Priority: priority, + DocKey: doc.Key().String(), + Cid: headNode.Cid(), + SchemaRoot: c.Schema().Root, + Block: headNode, + Priority: priority, }, ) }, @@ -1118,7 +1051,7 @@ func (c *collection) validateOneToOneLinkDoesntAlreadyExist( return nil } - objFieldDescription, ok := c.desc.Schema.GetField(strings.TrimSuffix(fieldDescription.Name, request.RelatedObjectID)) + objFieldDescription, ok := c.Schema().GetField(strings.TrimSuffix(fieldDescription.Name, request.RelatedObjectID)) if !ok { return client.NewErrFieldNotExist(strings.TrimSuffix(fieldDescription.Name, request.RelatedObjectID)) } @@ -1286,10 +1219,17 @@ func (c *collection) saveValueToMerkleCRDT( if err != nil { return nil, 0, err } - field, _ := c.Description().GetFieldByID(client.FieldID(fieldID)) + + schema := c.Schema() + + field, ok := c.Description().GetFieldByID(client.FieldID(fieldID), &schema) + if !ok { + return nil, 0, client.NewErrFieldIndexNotExist(fieldID) + } + merkleCRDT, err := c.db.crdtFactory.InstanceWithStores( txn, - core.NewCollectionSchemaVersionKey(c.Schema().VersionID), + core.NewCollectionSchemaVersionKey(schema.VersionID, c.ID()), c.db.events.Updates, ctype, key, @@ -1300,7 +1240,6 @@ func (c *collection) saveValueToMerkleCRDT( } var bytes []byte - var ok bool // parse args if len(args) != 1 { return nil, 0, ErrUnknownCRDTArgument @@ -1315,7 +1254,7 @@ func (c *collection) saveValueToMerkleCRDT( key = key.WithFieldId(core.COMPOSITE_NAMESPACE) merkleCRDT, err := c.db.crdtFactory.InstanceWithStores( txn, - core.NewCollectionSchemaVersionKey(c.Schema().VersionID), + core.NewCollectionSchemaVersionKey(c.Schema().VersionID, c.ID()), c.db.events.Updates, ctype, key, @@ -1383,14 +1322,14 @@ func (c *collection) commitImplicitTxn(ctx context.Context, txn datastore.Txn) e func (c *collection) getPrimaryKeyFromDocKey(docKey client.DocKey) core.PrimaryDataStoreKey { return core.PrimaryDataStoreKey{ - CollectionId: fmt.Sprint(c.colID), + CollectionId: fmt.Sprint(c.ID()), DocKey: docKey.String(), } } func (c *collection) getDSKeyFromDockey(docKey client.DocKey) core.DataStoreKey { return core.DataStoreKey{ - CollectionID: fmt.Sprint(c.colID), + CollectionID: fmt.Sprint(c.ID()), DocKey: docKey.String(), InstanceType: core.ValueKey, } @@ -1412,7 +1351,7 @@ func (c *collection) tryGetFieldKey(key core.PrimaryDataStoreKey, fieldName stri // tryGetSchemaFieldID returns the FieldID of the given fieldName. // Will return false if the field is not found. func (c *collection) tryGetSchemaFieldID(fieldName string) (uint32, bool) { - for _, field := range c.desc.Schema.Fields { + for _, field := range c.Schema().Fields { if field.Name == fieldName { if field.IsObject() || field.IsObjectArray() { // We do not wish to match navigational properties, only diff --git a/db/collection_delete.go b/db/collection_delete.go index 480656849f..7f6a968a97 100644 --- a/db/collection_delete.go +++ b/db/collection_delete.go @@ -207,7 +207,7 @@ func (c *collection) deleteWithFilter( // Convert from string to client.DocKey. key := core.PrimaryDataStoreKey{ - CollectionId: fmt.Sprint(c.colID), + CollectionId: fmt.Sprint(c.ID()), DocKey: docKey, } @@ -279,11 +279,11 @@ func (c *collection) applyDelete( func() { c.db.events.Updates.Value().Publish( events.Update{ - DocKey: key.DocKey, - Cid: headNode.Cid(), - SchemaID: c.schemaID, - Block: headNode, - Priority: priority, + DocKey: key.DocKey, + Cid: headNode.Cid(), + SchemaRoot: c.Schema().Root, + Block: headNode, + Priority: priority, }, ) }, diff --git a/db/collection_get.go b/db/collection_get.go index 8262ff44ba..d210072793 100644 --- a/db/collection_get.go +++ b/db/collection_get.go @@ -53,16 +53,15 @@ func (c *collection) get( ) (*client.Document, error) { // create a new document fetcher df := c.newFetcher() - desc := &c.desc // initialize it with the primary index - err := df.Init(ctx, txn, &c.desc, fields, nil, nil, false, showDeleted) + err := df.Init(ctx, txn, c, fields, nil, nil, false, showDeleted) if err != nil { _ = df.Close() return nil, err } // construct target key for DocKey - targetKey := base.MakeDocKey(*desc, key.DocKey) + targetKey := base.MakeDocKey(c.Description(), key.DocKey) // run the doc fetcher err = df.Start(ctx, core.NewSpans(core.NewSpan(targetKey, targetKey.PrefixEnd()))) if err != nil { diff --git a/db/collection_index.go b/db/collection_index.go index 791817a0a3..278586902b 100644 --- a/db/collection_index.go +++ b/db/collection_index.go @@ -17,9 +17,6 @@ import ( "strconv" "strings" - ds "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/query" - "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" @@ -63,7 +60,7 @@ func (db *db) getAllIndexes( ) (map[client.CollectionName][]client.IndexDescription, error) { prefix := core.NewCollectionIndexKey("", "") - deserializedIndexes, err := deserializePrefix[client.IndexDescription](ctx, + keys, indexDescriptions, err := datastore.DeserializePrefix[client.IndexDescription](ctx, prefix.ToString(), txn.Systemstore()) if err != nil { @@ -72,12 +69,15 @@ func (db *db) getAllIndexes( indexes := make(map[client.CollectionName][]client.IndexDescription) - for _, indexRec := range deserializedIndexes { - indexKey, err := core.NewCollectionIndexKeyFromString(indexRec.key) + for i := range keys { + indexKey, err := core.NewCollectionIndexKeyFromString(keys[i]) if err != nil { return nil, NewErrInvalidStoredIndexKey(indexKey.ToString()) } - indexes[indexKey.CollectionName] = append(indexes[indexKey.CollectionName], indexRec.element) + indexes[indexKey.CollectionName] = append( + indexes[indexKey.CollectionName], + indexDescriptions[i], + ) } return indexes, nil @@ -89,16 +89,12 @@ func (db *db) fetchCollectionIndexDescriptions( colName string, ) ([]client.IndexDescription, error) { prefix := core.NewCollectionIndexKey(colName, "") - deserializedIndexes, err := deserializePrefix[client.IndexDescription](ctx, + _, indexDescriptions, err := datastore.DeserializePrefix[client.IndexDescription](ctx, prefix.ToString(), txn.Systemstore()) if err != nil { return nil, err } - indexes := make([]client.IndexDescription, 0, len(deserializedIndexes)) - for _, indexRec := range deserializedIndexes { - indexes = append(indexes, indexRec.element) - } - return indexes, nil + return indexDescriptions, nil } func (c *collection) indexNewDoc(ctx context.Context, txn datastore.Txn, doc *client.Document) error { @@ -115,27 +111,6 @@ func (c *collection) indexNewDoc(ctx context.Context, txn datastore.Txn, doc *cl return nil } -// collectIndexedFields returns all fields that are indexed by all collection indexes. -func (c *collection) collectIndexedFields() []client.FieldDescription { - fieldsMap := make(map[string]client.FieldDescription) - for _, index := range c.indexes { - for _, field := range index.Description().Fields { - for i := range c.desc.Schema.Fields { - colField := c.desc.Schema.Fields[i] - if field.Name == colField.Name { - fieldsMap[field.Name] = colField - break - } - } - } - } - fields := make([]client.FieldDescription, 0, len(fieldsMap)) - for _, field := range fieldsMap { - fields = append(fields, field) - } - return fields -} - func (c *collection) updateIndexedDoc( ctx context.Context, txn datastore.Txn, @@ -145,7 +120,14 @@ func (c *collection) updateIndexedDoc( if err != nil { return err } - oldDoc, err := c.get(ctx, txn, c.getPrimaryKeyFromDocKey(doc.Key()), c.collectIndexedFields(), false) + desc := c.Description() + schema := c.Schema() + oldDoc, err := c.get( + ctx, + txn, + c.getPrimaryKeyFromDocKey(doc.Key()), desc.CollectIndexedFields(&schema), + false, + ) if err != nil { return err } @@ -236,7 +218,7 @@ func (c *collection) createIndex( if err != nil { return nil, err } - c.desc.Indexes = append(c.desc.Indexes, colIndex.Description()) + c.def.Description.Indexes = append(c.def.Description.Indexes, colIndex.Description()) c.indexes = append(c.indexes, colIndex) err = c.indexExistingDocs(ctx, txn, colIndex) if err != nil { @@ -252,12 +234,12 @@ func (c *collection) iterateAllDocs( exec func(doc *client.Document) error, ) error { df := c.newFetcher() - err := df.Init(ctx, txn, &c.desc, fields, nil, nil, false, false) + err := df.Init(ctx, txn, c, fields, nil, nil, false, false) if err != nil { _ = df.Close() return err } - start := base.MakeCollectionKey(c.desc) + start := base.MakeCollectionKey(c.Description()) spans := core.NewSpans(core.NewSpan(start, start.PrefixEnd())) err = df.Start(ctx, spans) @@ -297,8 +279,8 @@ func (c *collection) indexExistingDocs( ) error { fields := make([]client.FieldDescription, 0, 1) for _, field := range index.Description().Fields { - for i := range c.desc.Schema.Fields { - colField := c.desc.Schema.Fields[i] + for i := range c.Schema().Fields { + colField := c.Schema().Fields[i] if field.Name == colField.Name { fields = append(fields, colField) break @@ -352,9 +334,9 @@ func (c *collection) dropIndex(ctx context.Context, txn datastore.Txn, indexName return NewErrIndexWithNameDoesNotExists(indexName) } - for i := range c.desc.Indexes { - if c.desc.Indexes[i].Name == indexName { - c.desc.Indexes = append(c.desc.Indexes[:i], c.desc.Indexes[i+1:]...) + for i := range c.Description().Indexes { + if c.Description().Indexes[i].Name == indexName { + c.def.Description.Indexes = append(c.Description().Indexes[:i], c.Description().Indexes[i+1:]...) break } } @@ -370,7 +352,7 @@ func (c *collection) dropIndex(ctx context.Context, txn datastore.Txn, indexName func (c *collection) dropAllIndexes(ctx context.Context, txn datastore.Txn) error { prefix := core.NewCollectionIndexKey(c.Name(), "") - keys, err := fetchKeysForPrefix(ctx, prefix.ToString(), txn.Systemstore()) + keys, err := datastore.FetchKeysForPrefix(ctx, prefix.ToString(), txn.Systemstore()) if err != nil { return err } @@ -398,7 +380,7 @@ func (c *collection) loadIndexes(ctx context.Context, txn datastore.Txn) error { } colIndexes = append(colIndexes, index) } - c.desc.Indexes = indexDescriptions + c.def.Description.Indexes = indexDescriptions c.indexes = colIndexes return nil } @@ -415,14 +397,14 @@ func (c *collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, if err != nil { return nil, err } - return c.desc.Indexes, nil + return c.Description().Indexes, nil } func (c *collection) checkExistingFields( ctx context.Context, fields []client.IndexedFieldDescription, ) error { - collectionFields := c.Description().Schema.Fields + collectionFields := c.Schema().Fields for _, field := range fields { found := false for _, colField := range collectionFields { @@ -510,39 +492,3 @@ func generateIndexName(col client.Collection, fields []client.IndexedFieldDescri } return sb.String() } - -type deserializedElement[T any] struct { - key string - element T -} - -func deserializePrefix[T any]( - ctx context.Context, - prefix string, - storage ds.Read, -) ([]deserializedElement[T], error) { - q, err := storage.Query(ctx, query.Query{Prefix: prefix}) - if err != nil { - return nil, NewErrFailedToCreateCollectionQuery(err) - } - - elements := make([]deserializedElement[T], 0) - for res := range q.Next() { - if res.Error != nil { - _ = q.Close() - return nil, res.Error - } - - var element T - err = json.Unmarshal(res.Value, &element) - if err != nil { - _ = q.Close() - return nil, NewErrInvalidStoredIndex(err) - } - elements = append(elements, deserializedElement[T]{key: res.Key, element: element}) - } - if err := q.Close(); err != nil { - return nil, err - } - return elements, nil -} diff --git a/db/collection_test.go b/db/collection_test.go index e3686504d3..dd57cb285b 100644 --- a/db/collection_test.go +++ b/db/collection_test.go @@ -12,320 +12,11 @@ package db import ( "context" - "reflect" "testing" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" ) -func newTestCollectionWithSchema( - t *testing.T, - ctx context.Context, - db *implicitTxnDB, -) (client.Collection, error) { - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "Name", - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - { - Name: "Age", - Kind: client.FieldKind_INT, - Typ: client.LWW_REGISTER, - }, - { - Name: "Weight", - Kind: client.FieldKind_FLOAT, - Typ: client.LWW_REGISTER, - }, - }, - }, - } - - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - col, err := db.createCollection(ctx, txn, desc) - if err != nil { - return col, err - } - - return col, txn.Commit(ctx) -} - -func TestNewCollection_ReturnsError_GivenNoSchema(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - _, err = db.createCollection(ctx, txn, client.CollectionDescription{ - Name: "test", - }) - assert.Error(t, err) -} - -func TestNewCollectionWithSchema(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - schema := col.Schema() - desc := col.Description() - - assert.True(t, reflect.DeepEqual(schema, desc.Schema)) - assert.Equal(t, "users", col.Name()) - assert.Equal(t, uint32(1), col.ID()) - assert.False(t, reflect.DeepEqual(schema, client.SchemaDescription{})) - assert.Equal(t, 4, len(schema.Fields)) - - for i := 0; i < 4; i++ { - assert.Equal(t, client.FieldID(i), schema.Fields[i].ID) - } -} - -func TestNewCollectionReturnsErrorGivenDuplicateSchema(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - - _, err = newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - _, err = newTestCollectionWithSchema(t, ctx, db) - assert.Errorf(t, err, "collection already exists") -} - -func TestNewCollectionReturnsErrorGivenNoFields(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{}, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError( - t, - err, - "invalid state, required property is uninitialized. Host: Collection, PropertyName: Fields", - ) -} - -func TestNewCollectionReturnsErrorGivenNoName(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{}, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError( - t, - err, - "invalid state, required property is uninitialized. Host: Collection, PropertyName: Name", - ) -} - -func TestNewCollectionReturnsErrorGivenNoKeyField(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "Name", - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - }, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError(t, err, "collection schema first field must be a DocKey") -} - -func TestNewCollectionReturnsErrorGivenKeyFieldIsNotFirstField(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "Name", - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - }, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError(t, err, "collection schema first field must be a DocKey") -} - -func TestNewCollectionReturnsErrorGivenFieldWithNoName(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "", - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - }, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError( - t, - err, - "invalid state, required property is uninitialized. Host: Collection.Schema, PropertyName: Name", - ) -} - -func TestNewCollectionReturnsErrorGivenFieldWithNoKind(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "Name", - Typ: client.LWW_REGISTER, - }, - }, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError( - t, - err, - "invalid state, required property is uninitialized. Host: Collection.Schema, PropertyName: FieldKind", - ) -} - -func TestNewCollectionReturnsErrorGivenFieldWithNoType(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - desc := client.CollectionDescription{ - Name: "users", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "Name", - Kind: client.FieldKind_STRING, - }, - }, - }, - } - - _, err = db.createCollection(ctx, txn, desc) - assert.EqualError( - t, - err, - "invalid state, required property is uninitialized. Host: Collection.Schema, PropertyName: CRDT type", - ) -} - -func TestGetCollectionByName(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - - _, err = newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, "users") - assert.NoError(t, err) - - schema := col.Schema() - desc := col.Description() - - assert.True(t, reflect.DeepEqual(schema, desc.Schema)) - assert.Equal(t, "users", col.Name()) - assert.Equal(t, uint32(1), col.ID()) - assert.False(t, reflect.DeepEqual(schema, client.SchemaDescription{})) - assert.Equal(t, 4, len(schema.Fields)) - - for i := 0; i < 4; i++ { - assert.Equal(t, client.FieldID(i), schema.Fields[i].ID) - } -} - func TestGetCollectionByNameReturnsErrorGivenNonExistantCollection(t *testing.T) { ctx := context.Background() db, err := newMemoryDB(ctx) diff --git a/db/collection_update.go b/db/collection_update.go index 1a15482935..c68902db44 100644 --- a/db/collection_update.go +++ b/db/collection_update.go @@ -303,13 +303,13 @@ func (c *collection) applyMergeToDoc( }) for mfield, mval := range mergeMap { - fd, isValidField := c.desc.Schema.GetField(mfield) + fd, isValidField := c.Schema().GetField(mfield) if !isValidField { return client.NewErrFieldNotExist(mfield) } if fd.Kind == client.FieldKind_FOREIGN_OBJECT { - fd, isValidField = c.desc.Schema.GetField(mfield + request.RelatedObjectID) + fd, isValidField = c.Schema().GetField(mfield + request.RelatedObjectID) if !isValidField { return client.NewErrFieldNotExist(mfield) } @@ -335,7 +335,7 @@ func (c *collection) isSecondaryIDField(fieldDesc client.FieldDescription) (clie return client.FieldDescription{}, false } - relationFieldDescription, valid := c.Description().Schema.GetField( + relationFieldDescription, valid := c.Schema().GetField( strings.TrimSuffix(fieldDesc.Name, request.RelatedObjectID), ) return relationFieldDescription, valid && !relationFieldDescription.IsPrimaryRelation() @@ -350,6 +350,7 @@ func (c *collection) isSecondaryIDField(fieldDesc client.FieldDescription) (clie func (c *collection) patchPrimaryDoc( ctx context.Context, txn datastore.Txn, + secondaryCollectionName string, relationFieldDescription client.FieldDescription, docKey string, fieldValue string, @@ -364,13 +365,19 @@ func (c *collection) patchPrimaryDoc( return err } primaryCol = primaryCol.WithTxn(txn) + primarySchema := primaryCol.Schema() - primaryField, ok := primaryCol.Description().GetRelation(relationFieldDescription.RelationName) + primaryField, ok := primaryCol.Description().GetFieldByRelation( + relationFieldDescription.RelationName, + secondaryCollectionName, + relationFieldDescription.Name, + &primarySchema, + ) if !ok { return client.NewErrFieldNotExist(relationFieldDescription.RelationName) } - primaryIDField, ok := primaryCol.Description().Schema.GetField(primaryField.Name + request.RelatedObjectID) + primaryIDField, ok := primaryCol.Schema().GetField(primaryField.Name + request.RelatedObjectID) if !ok { return client.NewErrFieldNotExist(primaryField.Name + request.RelatedObjectID) } diff --git a/db/db.go b/db/db.go index 0bc9a361c3..f2f59ecdaf 100644 --- a/db/db.go +++ b/db/db.go @@ -113,8 +113,7 @@ func NewDB(ctx context.Context, rootstore datastore.RootStore, options ...Option func newDB(ctx context.Context, rootstore datastore.RootStore, options ...Option) (*implicitTxnDB, error) { log.Debug(ctx, "Loading: internal datastores") - root := datastore.AsDSReaderWriter(rootstore) - multistore := datastore.MultiStoreFrom(root) + multistore := datastore.MultiStoreFrom(rootstore) crdtFactory := crdt.DefaultFactory.WithStores(multistore) parser, err := graphql.NewParser() @@ -183,8 +182,9 @@ func (db *db) Blockstore() blockstore.Blockstore { return db.multistore.DAGstore() } -func (db *db) systemstore() datastore.DSReaderWriter { - return db.multistore.Systemstore() +// Peerstore returns the internal DAG store which contains IPLD blocks. +func (db *db) Peerstore() datastore.DSBatching { + return db.multistore.Peerstore() } func (db *db) LensRegistry() client.LensRegistry { @@ -266,17 +266,17 @@ func (db *db) PrintDump(ctx context.Context) error { // Close is called when we are shutting down the database. // This is the place for any last minute cleanup or releasing of resources (i.e.: Badger instance). -func (db *db) Close(ctx context.Context) { - log.Info(ctx, "Closing DefraDB process...") +func (db *db) Close() { + log.Info(context.Background(), "Closing DefraDB process...") if db.events.Updates.HasValue() { db.events.Updates.Value().Close() } err := db.rootstore.Close() if err != nil { - log.ErrorE(ctx, "Failure closing running process", err) + log.ErrorE(context.Background(), "Failure closing running process", err) } - log.Info(ctx, "Successfully closed running process") + log.Info(context.Background(), "Successfully closed running process") } func printStore(ctx context.Context, store datastore.DSReaderWriter) error { diff --git a/db/db_test.go b/db/db_test.go index c1a9648f36..237a1f21ed 100644 --- a/db/db_test.go +++ b/db/db_test.go @@ -14,15 +14,9 @@ import ( "context" "testing" - badger "github.com/dgraph-io/badger/v4" - dag "github.com/ipfs/boxo/ipld/merkledag" - "github.com/stretchr/testify/assert" + badger "github.com/sourcenetwork/badger/v4" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - corecrdt "github.com/sourcenetwork/defradb/core/crdt" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/merkle/clock" ) func newMemoryDB(ctx context.Context) (*implicitTxnDB, error) { @@ -48,369 +42,3 @@ func TestNewDB(t *testing.T) { t.Error(err) } } - -func TestDBSaveSimpleDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - if err != nil { - t.Error(err) - return - } - - err = col.Save(ctx, doc) - if err != nil { - t.Error(err) - } - - // value check - name, err := doc.Get("Name") - assert.NoError(t, err) - age, err := doc.Get("Age") - assert.NoError(t, err) - weight, err := doc.Get("Weight") - assert.NoError(t, err) - - assert.Equal(t, "John", name) - assert.Equal(t, int64(21), age) - assert.Equal(t, 154.1, weight) - - _, err = doc.Get("DoesntExist") - assert.Error(t, err) - - // db.printDebugDB() -} - -func TestDBUpdateDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - if err != nil { - t.Error(err) - return - } - - err = col.Save(ctx, doc) - if err != nil { - t.Error(err) - } - - // update fields - doc.Set("Name", "Pete") - doc.Delete("Weight") - - weightField := doc.Fields()["Weight"] - weightVal, _ := doc.GetValueWithField(weightField) - assert.True(t, weightVal.IsDelete()) - - err = col.Update(ctx, doc) - if err != nil { - t.Error(err) - } - - // value check - name, err := doc.Get("Name") - assert.NoError(t, err) - age, err := doc.Get("Age") - assert.NoError(t, err) - weight, err := doc.Get("Weight") - assert.NoError(t, err) - - assert.Equal(t, "Pete", name) - assert.Equal(t, int64(21), age) - assert.Nil(t, weight) -} - -func TestDBUpdateNonExistingDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - if err != nil { - t.Error(err) - return - } - - err = col.Update(ctx, doc) - assert.Error(t, err) -} - -func TestDBUpdateExistingDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - assert.NoError(t, err) - - err = col.Save(ctx, doc) - assert.NoError(t, err) - - testJSONObj = []byte(`{ - "_key": "bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d", - "Name": "Pete", - "Age": 31 - }`) - - doc, err = client.NewDocFromJSON(testJSONObj) - assert.NoError(t, err) - - err = col.Update(ctx, doc) - assert.NoError(t, err) - - // value check - name, err := doc.Get("Name") - assert.NoError(t, err) - age, err := doc.Get("Age") - assert.NoError(t, err) - // weight, err := doc.Get("Weight") - // assert.NoError(t, err) - - assert.Equal(t, "Pete", name) - assert.Equal(t, int64(31), age) -} - -func TestDBGetDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - assert.NoError(t, err) - - err = col.Save(ctx, doc) - assert.NoError(t, err) - - key, err := client.NewDocKeyFromString("bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d") - assert.NoError(t, err) - doc, err = col.Get(ctx, key, false) - assert.NoError(t, err) - - // value check - name, err := doc.Get("Name") - assert.NoError(t, err) - age, err := doc.Get("Age") - assert.NoError(t, err) - weight, err := doc.Get("Weight") - assert.NoError(t, err) - - assert.Equal(t, "John", name) - assert.Equal( - t, - uint64(21), - age, - ) // note: uint is used here, because the CBOR implementation converts all positive ints to uint64 - assert.Equal(t, 154.1, weight) -} - -func TestDBGetNotFoundDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - key, err := client.NewDocKeyFromString("bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d") - assert.NoError(t, err) - _, err = col.Get(ctx, key, false) - assert.EqualError(t, err, client.ErrDocumentNotFound.Error()) -} - -func TestDBDeleteDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - assert.NoError(t, err) - - err = col.Save(ctx, doc) - assert.NoError(t, err) - - key, err := client.NewDocKeyFromString("bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d") - assert.NoError(t, err) - deleted, err := col.Delete(ctx, key) - assert.NoError(t, err) - assert.True(t, deleted) -} - -func TestDBDeleteNotFoundDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - key, err := client.NewDocKeyFromString("bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d") - assert.NoError(t, err) - deleted, err := col.Delete(ctx, key) - assert.EqualError(t, err, client.ErrDocumentNotFound.Error()) - assert.False(t, deleted) -} - -func TestDocumentMerkleDAG(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21, - "Weight": 154.1 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - assert.NoError(t, err) - - err = col.Save(ctx, doc) - assert.NoError(t, err) - - clk := clock.NewMerkleClock( - db.multistore.Headstore(), - nil, - core.HeadStoreKey{}.WithDocKey( - "bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d", - ).WithFieldId( - "Name", - ), - nil, - ) - heads := clk.(*clock.MerkleClock).Heads() - cids, _, err := heads.List(ctx) - assert.NoError(t, err) - - reg := corecrdt.LWWRegister{} - for _, c := range cids { - b, errGet := db.Blockstore().Get(ctx, c) - assert.NoError(t, errGet) - - nd, errDecode := dag.DecodeProtobuf(b.RawData()) - assert.NoError(t, errDecode) - - _, errMarshal := nd.MarshalJSON() - assert.NoError(t, errMarshal) - - _, errDeltaDecode := reg.DeltaDecode(nd) - assert.NoError(t, errDeltaDecode) - } - - testJSONObj = []byte(`{ - "_key": "bae-09cd7539-9b86-5661-90f6-14fbf6c1a14d", - "Name": "Pete", - "Age": 31 - }`) - - doc, err = client.NewDocFromJSON(testJSONObj) - assert.NoError(t, err) - - err = col.Update(ctx, doc) - assert.NoError(t, err) - - heads = clk.(*clock.MerkleClock).Heads() - cids, _, err = heads.List(ctx) - assert.NoError(t, err) - - for _, c := range cids { - b, err := db.Blockstore().Get(ctx, c) - assert.NoError(t, err) - - nd, err := dag.DecodeProtobuf(b.RawData()) - assert.NoError(t, err) - - _, err = nd.MarshalJSON() - assert.NoError(t, err) - - _, err = reg.DeltaDecode(nd) - assert.NoError(t, err) - } -} - -// collection with schema -func TestDBSchemaSaveSimpleDocument(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - testJSONObj := []byte(`{ - "Name": "John", - "Age": 21 - }`) - - doc, err := client.NewDocFromJSON(testJSONObj) - if err != nil { - t.Error(err) - return - } - - err = col.Save(ctx, doc) - assert.NoError(t, err) - - // value check - name, err := doc.Get("Name") - assert.NoError(t, err) - age, err := doc.Get("Age") - assert.NoError(t, err) - - assert.Equal(t, "John", name) - assert.Equal(t, int64(21), age) - - err = db.PrintDump(ctx) - assert.Nil(t, err) -} diff --git a/db/description/collection.go b/db/description/collection.go new file mode 100644 index 0000000000..a334ec6384 --- /dev/null +++ b/db/description/collection.go @@ -0,0 +1,230 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package description + +import ( + "context" + "encoding/json" + + "github.com/ipfs/go-datastore/query" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" +) + +// SaveCollection saves the given collection to the system store overwriting any +// pre-existing values. +func SaveCollection( + ctx context.Context, + txn datastore.Txn, + desc client.CollectionDescription, +) (client.CollectionDescription, error) { + buf, err := json.Marshal(desc) + if err != nil { + return client.CollectionDescription{}, err + } + + key := core.NewCollectionKey(desc.ID) + err = txn.Systemstore().Put(ctx, key.ToDS(), buf) + if err != nil { + return client.CollectionDescription{}, err + } + + idBuf, err := json.Marshal(desc.ID) + if err != nil { + return client.CollectionDescription{}, err + } + + nameKey := core.NewCollectionNameKey(desc.Name) + err = txn.Systemstore().Put(ctx, nameKey.ToDS(), idBuf) + if err != nil { + return client.CollectionDescription{}, err + } + + // The need for this key is temporary, we should replace it with the global collection ID + // https://github.com/sourcenetwork/defradb/issues/1085 + schemaVersionKey := core.NewCollectionSchemaVersionKey(desc.SchemaVersionID, desc.ID) + err = txn.Systemstore().Put(ctx, schemaVersionKey.ToDS(), []byte{}) + if err != nil { + return client.CollectionDescription{}, err + } + + return desc, nil +} + +// GetCollectionByName returns the collection with the given name. +// +// If no collection of that name is found, it will return an error. +func GetCollectionByName( + ctx context.Context, + txn datastore.Txn, + name string, +) (client.CollectionDescription, error) { + nameKey := core.NewCollectionNameKey(name) + idBuf, err := txn.Systemstore().Get(ctx, nameKey.ToDS()) + if err != nil { + return client.CollectionDescription{}, err + } + + var id uint32 + err = json.Unmarshal(idBuf, &id) + if err != nil { + return client.CollectionDescription{}, err + } + + key := core.NewCollectionKey(id) + buf, err := txn.Systemstore().Get(ctx, key.ToDS()) + if err != nil { + return client.CollectionDescription{}, err + } + + var col client.CollectionDescription + err = json.Unmarshal(buf, &col) + if err != nil { + return client.CollectionDescription{}, err + } + + return col, nil +} + +// GetCollectionsBySchemaVersionID returns all collections that use the given +// schemaVersionID. +// +// If no collections are found an empty set will be returned. +func GetCollectionsBySchemaVersionID( + ctx context.Context, + txn datastore.Txn, + schemaVersionID string, +) ([]client.CollectionDescription, error) { + schemaVersionKey := core.NewCollectionSchemaVersionKey(schemaVersionID, 0) + + schemaVersionQuery, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: schemaVersionKey.ToString(), + KeysOnly: true, + }) + if err != nil { + return nil, NewErrFailedToCreateCollectionQuery(err) + } + + colIDs := make([]uint32, 0) + for res := range schemaVersionQuery.Next() { + if res.Error != nil { + if err := schemaVersionQuery.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + return nil, err + } + + colSchemaVersionKey, err := core.NewCollectionSchemaVersionKeyFromString(string(res.Key)) + if err != nil { + if err := schemaVersionQuery.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + return nil, err + } + + colIDs = append(colIDs, colSchemaVersionKey.CollectionID) + } + + cols := make([]client.CollectionDescription, len(colIDs)) + for i, colID := range colIDs { + key := core.NewCollectionKey(colID) + buf, err := txn.Systemstore().Get(ctx, key.ToDS()) + if err != nil { + return nil, err + } + + var col client.CollectionDescription + err = json.Unmarshal(buf, &col) + if err != nil { + return nil, err + } + + cols[i] = col + } + + return cols, nil +} + +// GetCollectionsBySchemaRoot returns all collections that use the given +// schema root. +// +// If no collections are found an empty set will be returned. +func GetCollectionsBySchemaRoot( + ctx context.Context, + txn datastore.Txn, + schemaRoot string, +) ([]client.CollectionDescription, error) { + schemaVersionIDs, err := GetSchemaVersionIDs(ctx, txn, schemaRoot) + if err != nil { + return nil, err + } + + cols := []client.CollectionDescription{} + for _, schemaVersionID := range schemaVersionIDs { + versionCols, err := GetCollectionsBySchemaVersionID(ctx, txn, schemaVersionID) + if err != nil { + return nil, err + } + + cols = append(cols, versionCols...) + } + + return cols, nil +} + +// GetCollections returns all collections in the system. +func GetCollections( + ctx context.Context, + txn datastore.Txn, +) ([]client.CollectionDescription, error) { + q, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: core.COLLECTION, + }) + if err != nil { + return nil, NewErrFailedToCreateCollectionQuery(err) + } + + cols := make([]client.CollectionDescription, 0) + for res := range q.Next() { + if res.Error != nil { + if err := q.Close(); err != nil { + return nil, NewErrFailedToCloseCollectionQuery(err) + } + return nil, err + } + + var col client.CollectionDescription + err = json.Unmarshal(res.Value, &col) + if err != nil { + if err := q.Close(); err != nil { + return nil, NewErrFailedToCloseCollectionQuery(err) + } + return nil, err + } + + cols = append(cols, col) + } + + return cols, nil +} + +// HasCollectionByName returns true if there is a collection of the given name, +// else returns false. +func HasCollectionByName( + ctx context.Context, + txn datastore.Txn, + name string, +) (bool, error) { + nameKey := core.NewCollectionNameKey(name) + return txn.Systemstore().Has(ctx, nameKey.ToDS()) +} diff --git a/db/description/errors.go b/db/description/errors.go new file mode 100644 index 0000000000..72bd63908a --- /dev/null +++ b/db/description/errors.go @@ -0,0 +1,44 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package description + +import "github.com/sourcenetwork/defradb/errors" + +const ( + errFailedToCreateSchemaQuery string = "failed to create schema prefix query" + errFailedToCloseSchemaQuery string = "failed to close schema prefix query" + errFailedToCreateCollectionQuery string = "failed to create collection prefix query" + errFailedToCloseCollectionQuery string = "failed to close collection prefix query" +) + +// NewErrFailedToCreateSchemaQuery returns a new error indicating that the query +// to create a schema failed. +func NewErrFailedToCreateSchemaQuery(inner error) error { + return errors.Wrap(errFailedToCreateSchemaQuery, inner) +} + +// NewErrFailedToCreateSchemaQuery returns a new error indicating that the query +// to create a schema failed to close. +func NewErrFailedToCloseSchemaQuery(inner error) error { + return errors.Wrap(errFailedToCloseSchemaQuery, inner) +} + +// NewErrFailedToCreateCollectionQuery returns a new error indicating that the query +// to create a collection failed. +func NewErrFailedToCreateCollectionQuery(inner error) error { + return errors.Wrap(errFailedToCreateCollectionQuery, inner) +} + +// NewErrFailedToCreateCollectionQuery returns a new error indicating that the query +// to create a collection failed to close. +func NewErrFailedToCloseCollectionQuery(inner error) error { + return errors.Wrap(errFailedToCloseCollectionQuery, inner) +} diff --git a/db/description/schema.go b/db/description/schema.go new file mode 100644 index 0000000000..06b129f3df --- /dev/null +++ b/db/description/schema.go @@ -0,0 +1,285 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package description + +import ( + "context" + "encoding/json" + + "github.com/ipfs/go-datastore/query" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/core/cid" + "github.com/sourcenetwork/defradb/datastore" +) + +// CreateSchemaVersion creates and saves to the store a new schema version. +// +// If the Root is empty it will be set to the new version ID. +func CreateSchemaVersion( + ctx context.Context, + txn datastore.Txn, + desc client.SchemaDescription, +) (client.SchemaDescription, error) { + for i := range desc.Fields { + // This is not wonderful and will probably break when we add the ability + // to delete fields, however it is good enough for now and matches the + // create behaviour. + desc.Fields[i].ID = client.FieldID(i) + } + + buf, err := json.Marshal(desc) + if err != nil { + return client.SchemaDescription{}, err + } + + scid, err := cid.NewSHA256CidV1(buf) + if err != nil { + return client.SchemaDescription{}, err + } + versionID := scid.String() + previousSchemaVersionID := desc.VersionID + isNew := desc.Root == "" + + desc.VersionID = versionID + if isNew { + // If this is a new schema, the Root will match the version ID + desc.Root = versionID + } + + // Rebuild the json buffer to include the newly set ID properties + buf, err = json.Marshal(desc) + if err != nil { + return client.SchemaDescription{}, err + } + + key := core.NewSchemaVersionKey(versionID) + err = txn.Systemstore().Put(ctx, key.ToDS(), buf) + if err != nil { + return client.SchemaDescription{}, err + } + + if !isNew { + // We don't need to add a history key if this is the first version + schemaVersionHistoryKey := core.NewSchemaHistoryKey(desc.Root, previousSchemaVersionID) + err = txn.Systemstore().Put(ctx, schemaVersionHistoryKey.ToDS(), []byte(desc.VersionID)) + if err != nil { + return client.SchemaDescription{}, err + } + } + + return desc, nil +} + +// GetSchemaVersion returns the schema description for the schema version of the +// ID provided. +// +// Will return an error if it is not found. +func GetSchemaVersion( + ctx context.Context, + txn datastore.Txn, + versionID string, +) (client.SchemaDescription, error) { + key := core.NewSchemaVersionKey(versionID) + + buf, err := txn.Systemstore().Get(ctx, key.ToDS()) + if err != nil { + return client.SchemaDescription{}, err + } + + var desc client.SchemaDescription + err = json.Unmarshal(buf, &desc) + if err != nil { + return client.SchemaDescription{}, err + } + + return desc, nil +} + +// GetSchemasByName returns all the schema with the given name. +func GetSchemasByName( + ctx context.Context, + txn datastore.Txn, + name string, +) ([]client.SchemaDescription, error) { + allSchemas, err := GetAllSchemas(ctx, txn) + if err != nil { + return nil, err + } + + nameSchemas := []client.SchemaDescription{} + for _, schema := range allSchemas { + if schema.Name == name { + nameSchemas = append(nameSchemas, schema) + } + } + + return nameSchemas, nil +} + +// GetSchemasByRoot returns all the schema with the given root. +func GetSchemasByRoot( + ctx context.Context, + txn datastore.Txn, + root string, +) ([]client.SchemaDescription, error) { + allSchemas, err := GetAllSchemas(ctx, txn) + if err != nil { + return nil, err + } + + rootSchemas := []client.SchemaDescription{} + for _, schema := range allSchemas { + if schema.Root == root { + rootSchemas = append(rootSchemas, schema) + } + } + + return rootSchemas, nil +} + +// GetSchemas returns the schema of all the default schema versions in the system. +func GetSchemas( + ctx context.Context, + txn datastore.Txn, +) ([]client.SchemaDescription, error) { + cols, err := GetCollections(ctx, txn) + if err != nil { + return nil, err + } + + versionIDs := make([]string, 0) + for _, col := range cols { + versionIDs = append(versionIDs, col.SchemaVersionID) + } + + schemaVersionPrefix := core.NewSchemaVersionKey("") + schemaVersionQuery, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: schemaVersionPrefix.ToString(), + }) + if err != nil { + return nil, NewErrFailedToCreateSchemaQuery(err) + } + + descriptions := make([]client.SchemaDescription, 0) + for res := range schemaVersionQuery.Next() { + if res.Error != nil { + if err := schemaVersionQuery.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + return nil, err + } + + var desc client.SchemaDescription + err = json.Unmarshal(res.Value, &desc) + if err != nil { + if err := schemaVersionQuery.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + return nil, err + } + + for _, versionID := range versionIDs { + if desc.VersionID == versionID { + descriptions = append(descriptions, desc) + break + } + } + } + + if err := schemaVersionQuery.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + + return descriptions, nil +} + +// GetSchemas returns all schema versions in the system. +func GetAllSchemas( + ctx context.Context, + txn datastore.Txn, +) ([]client.SchemaDescription, error) { + prefix := core.NewSchemaVersionKey("") + q, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: prefix.ToString(), + }) + if err != nil { + return nil, NewErrFailedToCreateSchemaQuery(err) + } + + schemas := make([]client.SchemaDescription, 0) + for res := range q.Next() { + if res.Error != nil { + if err := q.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + return nil, err + } + + var desc client.SchemaDescription + err = json.Unmarshal(res.Value, &desc) + if err != nil { + if err := q.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + return nil, err + } + + schemas = append(schemas, desc) + } + + if err := q.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + + return schemas, nil +} + +func GetSchemaVersionIDs( + ctx context.Context, + txn datastore.Txn, + schemaRoot string, +) ([]string, error) { + // Add the schema root as the first version here. + // It is not present in the history prefix. + schemaVersions := []string{schemaRoot} + + prefix := core.NewSchemaHistoryKey(schemaRoot, "") + q, err := txn.Systemstore().Query(ctx, query.Query{ + Prefix: prefix.ToString(), + KeysOnly: true, + }) + if err != nil { + return nil, NewErrFailedToCreateSchemaQuery(err) + } + + for res := range q.Next() { + if res.Error != nil { + if err := q.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + return nil, err + } + + key, err := core.NewSchemaHistoryKeyFromString(res.Key) + if err != nil { + if err := q.Close(); err != nil { + return nil, NewErrFailedToCloseSchemaQuery(err) + } + return nil, err + } + + schemaVersions = append(schemaVersions, key.PreviousSchemaVersionID) + } + + return schemaVersions, nil +} diff --git a/db/errors.go b/db/errors.go index 4a456cd41a..651bcbe42b 100644 --- a/db/errors.go +++ b/db/errors.go @@ -25,7 +25,7 @@ const ( errRemovingP2PCollection string = "cannot remove collection ID" errAddCollectionWithPatch string = "unknown collection, adding collections via patch is not supported" errCollectionIDDoesntMatch string = "CollectionID does not match existing" - errSchemaIDDoesntMatch string = "SchemaID does not match existing" + errSchemaRootDoesntMatch string = "SchemaRoot does not match existing" errCannotModifySchemaName string = "modifying the schema name is not supported" errCannotSetVersionID string = "setting the VersionID is not supported. It is updated automatically" errCannotSetFieldID string = "explicitly setting a field ID value is not supported" @@ -66,6 +66,7 @@ const ( errCanNotDropIndexWithPatch string = "dropping indexes via patch is not supported" errCanNotChangeIndexWithPatch string = "changing indexes via patch is not supported" errIndexWithNameDoesNotExists string = "index with name doesn't exists" + errCorruptedIndex string = "corrupted index. Please delete and recreate the index" errInvalidFieldValue string = "invalid field value" errUnsupportedIndexFieldType string = "unsupported index field type" errIndexDescriptionHasNoFields string = "index description has no fields" @@ -111,17 +112,17 @@ var ( ErrDocumentDeleted = errors.New(errDocumentDeleted) ErrUnknownCRDTArgument = errors.New("invalid CRDT arguments") ErrUnknownCRDT = errors.New("unknown crdt") - ErrSchemaFirstFieldDocKey = errors.New("collection schema first field must be a DocKey") ErrCollectionAlreadyExists = errors.New("collection already exists") ErrCollectionNameEmpty = errors.New("collection name can't be empty") - ErrSchemaIDEmpty = errors.New("schema ID can't be empty") + ErrSchemaNameEmpty = errors.New("schema name can't be empty") + ErrSchemaRootEmpty = errors.New("schema root can't be empty") ErrSchemaVersionIDEmpty = errors.New("schema version ID can't be empty") ErrKeyEmpty = errors.New("key cannot be empty") ErrAddingP2PCollection = errors.New(errAddingP2PCollection) ErrRemovingP2PCollection = errors.New(errRemovingP2PCollection) ErrAddCollectionWithPatch = errors.New(errAddCollectionWithPatch) ErrCollectionIDDoesntMatch = errors.New(errCollectionIDDoesntMatch) - ErrSchemaIDDoesntMatch = errors.New(errSchemaIDDoesntMatch) + ErrSchemaRootDoesntMatch = errors.New(errSchemaRootDoesntMatch) ErrCannotModifySchemaName = errors.New(errCannotModifySchemaName) ErrCannotSetVersionID = errors.New(errCannotSetVersionID) ErrCannotSetFieldID = errors.New(errCannotSetFieldID) @@ -147,6 +148,7 @@ var ( ErrIndexFieldMissingName = errors.New(errIndexFieldMissingName) ErrIndexFieldMissingDirection = errors.New(errIndexFieldMissingDirection) ErrIndexSingleFieldWrongDirection = errors.New(errIndexSingleFieldWrongDirection) + ErrCorruptedIndex = errors.New(errCorruptedIndex) ErrCanNotChangeIndexWithPatch = errors.New(errCanNotChangeIndexWithPatch) ErrFieldOrAliasToFieldNotExist = errors.New(errFieldOrAliasToFieldNotExist) ErrCreateFile = errors.New(errCreateFile) @@ -277,12 +279,12 @@ func NewErrCollectionIDDoesntMatch(name string, existingID, proposedID uint32) e ) } -func NewErrSchemaIDDoesntMatch(name, existingID, proposedID string) error { +func NewErrSchemaRootDoesntMatch(name, existingRoot, proposedRoot string) error { return errors.New( - errSchemaIDDoesntMatch, + errSchemaRootDoesntMatch, errors.NewKV("Name", name), - errors.NewKV("ExistingID", existingID), - errors.NewKV("ProposedID", proposedID), + errors.NewKV("ExistingRoot", existingRoot), + errors.NewKV("ProposedRoot", proposedRoot), ) } @@ -478,6 +480,15 @@ func NewErrIndexWithNameDoesNotExists(indexName string) error { ) } +// NewErrCorruptedIndex returns a new error indicating that an index with the +// given name has been corrupted. +func NewErrCorruptedIndex(indexName string) error { + return errors.New( + errCorruptedIndex, + errors.NewKV("Name", indexName), + ) +} + // NewErrCannotAddIndexWithPatch returns a new error indicating that an index cannot be added // with a patch. func NewErrCannotAddIndexWithPatch(proposedName string) error { diff --git a/db/fetcher/encoded_doc.go b/db/fetcher/encoded_doc.go index 3e19eb2218..bc22471465 100644 --- a/db/fetcher/encoded_doc.go +++ b/db/fetcher/encoded_doc.go @@ -131,6 +131,24 @@ func Decode(encdoc EncodedDocument) (*client.Document, error) { return doc, nil } +// MergeProperties merges the properties of the given document into this document. +// Existing fields of the current document are overwritten. +func (encdoc *encodedDocument) MergeProperties(other EncodedDocument) { + otherEncDoc, ok := other.(*encodedDocument) + if !ok { + return + } + for field, prop := range otherEncDoc.properties { + encdoc.properties[field] = prop + } + if other.Key() != nil { + encdoc.key = other.Key() + } + if other.SchemaVersionID() != "" { + encdoc.schemaVersionID = other.SchemaVersionID() + } +} + // DecodeToDoc returns a decoded document as a // map of field/value pairs func DecodeToDoc(encdoc EncodedDocument, mapping *core.DocumentMapping, filter bool) (core.Doc, error) { diff --git a/db/fetcher/fetcher.go b/db/fetcher/fetcher.go index 34f05d4f1d..da7a0df1e1 100644 --- a/db/fetcher/fetcher.go +++ b/db/fetcher/fetcher.go @@ -33,18 +33,22 @@ type ExecInfo struct { DocsFetched uint64 // Number of fields fetched. FieldsFetched uint64 + // Number of indexes fetched. + IndexesFetched uint64 } // Add adds the other ExecInfo to the current ExecInfo. func (s *ExecInfo) Add(other ExecInfo) { s.DocsFetched += other.DocsFetched s.FieldsFetched += other.FieldsFetched + s.IndexesFetched += other.IndexesFetched } // Reset resets the ExecInfo. func (s *ExecInfo) Reset() { s.DocsFetched = 0 s.FieldsFetched = 0 + s.IndexesFetched = 0 } // Fetcher is the interface for collecting documents from the underlying data store. @@ -53,7 +57,7 @@ type Fetcher interface { Init( ctx context.Context, txn datastore.Txn, - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, @@ -77,7 +81,7 @@ var ( // DocumentFetcher is a utility to incrementally fetch all the documents. type DocumentFetcher struct { - col *client.CollectionDescription + col client.Collection reverse bool deletedDocs bool @@ -133,7 +137,7 @@ type DocumentFetcher struct { func (df *DocumentFetcher) Init( ctx context.Context, txn datastore.Txn, - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, @@ -141,9 +145,6 @@ func (df *DocumentFetcher) Init( showDeleted bool, ) error { df.txn = txn - if col.Schema.IsEmpty() { - return client.NewErrUninitializeProperty("DocumentFetcher", "Schema") - } err := df.init(col, fields, filter, docmapper, reverse) if err != nil { @@ -162,7 +163,7 @@ func (df *DocumentFetcher) Init( } func (df *DocumentFetcher) init( - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docMapper *core.DocumentMapping, @@ -198,7 +199,7 @@ func (df *DocumentFetcher) init( // get them all var targetFields []client.FieldDescription if len(fields) == 0 { - targetFields = df.col.Schema.Fields + targetFields = df.col.Schema().Fields } else { targetFields = fields } @@ -209,12 +210,12 @@ func (df *DocumentFetcher) init( if df.filter != nil { conditions := df.filter.ToMap(df.mapping) - parsedfilterFields, err := parser.ParseFilterFieldsForDescription(conditions, df.col.Schema) + parsedfilterFields, err := parser.ParseFilterFieldsForDescription(conditions, df.col.Schema()) if err != nil { return err } df.filterFields = make(map[uint32]client.FieldDescription, len(parsedfilterFields)) - df.filterSet = bitset.New(uint(len(col.Schema.Fields))) + df.filterSet = bitset.New(uint(len(col.Schema().Fields))) for _, field := range parsedfilterFields { df.filterFields[uint32(field.ID)] = field df.filterSet.Set(uint(field.ID)) @@ -249,7 +250,7 @@ func (df *DocumentFetcher) start(ctx context.Context, spans core.Spans, withDele df.deletedDocs = withDeleted if !spans.HasValue { // no specified spans so create a prefix scan key for the entire collection - start := base.MakeCollectionKey(*df.col) + start := base.MakeCollectionKey(df.col.Description()) if withDeleted { start = start.WithDeletedFlag() } else { @@ -576,6 +577,8 @@ func (df *DocumentFetcher) fetchNext(ctx context.Context) (EncodedDocument, Exec // keyparts := df.kv.Key.List() // key := keyparts[len(keyparts)-2] + prevExecInfo := df.execInfo + defer func() { df.execInfo.Add(prevExecInfo) }() df.execInfo.Reset() // iterate until we have collected all the necessary kv pairs for the doc // we'll know when were done when either diff --git a/db/fetcher/indexer.go b/db/fetcher/indexer.go new file mode 100644 index 0000000000..a0ee94d0b9 --- /dev/null +++ b/db/fetcher/indexer.go @@ -0,0 +1,165 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package fetcher + +import ( + "context" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/base" + "github.com/sourcenetwork/defradb/planner/mapper" +) + +// IndexFetcher is a fetcher that fetches documents by index. +// It fetches only the indexed field and the rest of the fields are fetched by the internal fetcher. +type IndexFetcher struct { + docFetcher Fetcher + col client.Collection + txn datastore.Txn + indexFilter *mapper.Filter + docFilter *mapper.Filter + doc *encodedDocument + mapping *core.DocumentMapping + indexedField client.FieldDescription + docFields []client.FieldDescription + indexIter indexIterator + indexDataStoreKey core.IndexDataStoreKey + execInfo ExecInfo +} + +var _ Fetcher = (*IndexFetcher)(nil) + +// NewIndexFetcher creates a new IndexFetcher. +func NewIndexFetcher( + docFetcher Fetcher, + indexedFieldDesc client.FieldDescription, + indexFilter *mapper.Filter, +) *IndexFetcher { + return &IndexFetcher{ + docFetcher: docFetcher, + indexedField: indexedFieldDesc, + indexFilter: indexFilter, + } +} + +func (f *IndexFetcher) Init( + ctx context.Context, + txn datastore.Txn, + col client.Collection, + fields []client.FieldDescription, + filter *mapper.Filter, + docMapper *core.DocumentMapping, + reverse bool, + showDeleted bool, +) error { + f.col = col + f.docFilter = filter + f.doc = &encodedDocument{} + f.mapping = docMapper + f.txn = txn + + for _, index := range col.Description().Indexes { + if index.Fields[0].Name == f.indexedField.Name { + f.indexDataStoreKey.IndexID = index.ID + break + } + } + + f.indexDataStoreKey.CollectionID = f.col.ID() + + for i := range fields { + if fields[i].Name == f.indexedField.Name { + f.docFields = append(fields[:i], fields[i+1:]...) + break + } + } + + iter, err := createIndexIterator(f.indexDataStoreKey, f.indexFilter, &f.execInfo) + if err != nil { + return err + } + f.indexIter = iter + + if f.docFetcher != nil && len(f.docFields) > 0 { + err = f.docFetcher.Init(ctx, f.txn, f.col, f.docFields, f.docFilter, f.mapping, false, false) + } + + return err +} + +func (f *IndexFetcher) Start(ctx context.Context, spans core.Spans) error { + err := f.indexIter.Init(ctx, f.txn.Datastore()) + if err != nil { + return err + } + return nil +} + +func (f *IndexFetcher) FetchNext(ctx context.Context) (EncodedDocument, ExecInfo, error) { + totalExecInfo := f.execInfo + defer func() { f.execInfo.Add(totalExecInfo) }() + f.execInfo.Reset() + for { + f.doc.Reset() + + indexKey, hasValue, err := f.indexIter.Next() + if err != nil { + return nil, ExecInfo{}, err + } + + if !hasValue { + return nil, f.execInfo, nil + } + + property := &encProperty{ + Desc: f.indexedField, + Raw: indexKey.FieldValues[0], + } + + f.doc.key = indexKey.FieldValues[1] + f.doc.properties[f.indexedField] = property + f.execInfo.FieldsFetched++ + + if f.docFetcher != nil && len(f.docFields) > 0 { + targetKey := base.MakeDocKey(f.col.Description(), string(f.doc.key)) + spans := core.NewSpans(core.NewSpan(targetKey, targetKey.PrefixEnd())) + err = f.docFetcher.Start(ctx, spans) + if err != nil { + return nil, ExecInfo{}, err + } + encDoc, execInfo, err := f.docFetcher.FetchNext(ctx) + if err != nil { + return nil, ExecInfo{}, err + } + err = f.docFetcher.Close() + if err != nil { + return nil, ExecInfo{}, err + } + f.execInfo.Add(execInfo) + if encDoc == nil { + continue + } + f.doc.MergeProperties(encDoc) + } else { + f.execInfo.DocsFetched++ + } + return f.doc, f.execInfo, nil + } +} + +func (f *IndexFetcher) Close() error { + if f.indexIter != nil { + return f.indexIter.Close() + } + return nil +} diff --git a/db/fetcher/indexer_iterators.go b/db/fetcher/indexer_iterators.go new file mode 100644 index 0000000000..b563c9b3a3 --- /dev/null +++ b/db/fetcher/indexer_iterators.go @@ -0,0 +1,464 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package fetcher + +import ( + "bytes" + "context" + "errors" + "strings" + + "github.com/fxamacker/cbor/v2" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/connor" + "github.com/sourcenetwork/defradb/core" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/planner/mapper" + + "github.com/ipfs/go-datastore/query" +) + +const ( + opEq = "_eq" + opGt = "_gt" + opGe = "_ge" + opLt = "_lt" + opLe = "_le" + opNe = "_ne" + opIn = "_in" + opNin = "_nin" + opLike = "_like" + opNlike = "_nlike" +) + +// indexIterator is an iterator over index keys. +// It is used to iterate over the index keys that match a specific condition. +// For example, iteration over condition _eq and _gt will have completely different logic. +type indexIterator interface { + Init(context.Context, datastore.DSReaderWriter) error + Next() (core.IndexDataStoreKey, bool, error) + Close() error +} + +type queryResultIterator struct { + resultIter query.Results +} + +func (i queryResultIterator) Next() (core.IndexDataStoreKey, bool, error) { + res, hasVal := i.resultIter.NextSync() + if res.Error != nil { + return core.IndexDataStoreKey{}, false, res.Error + } + if !hasVal { + return core.IndexDataStoreKey{}, false, nil + } + key, err := core.NewIndexDataStoreKey(res.Key) + if err != nil { + return core.IndexDataStoreKey{}, false, err + } + return key, true, nil +} + +func (i queryResultIterator) Close() error { + return i.resultIter.Close() +} + +type eqIndexIterator struct { + queryResultIterator + indexKey core.IndexDataStoreKey + filterVal []byte + execInfo *ExecInfo +} + +func (i *eqIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { + i.indexKey.FieldValues = [][]byte{i.filterVal} + resultIter, err := store.Query(ctx, query.Query{ + Prefix: i.indexKey.ToString(), + KeysOnly: true, + }) + if err != nil { + return err + } + i.resultIter = resultIter + return nil +} + +func (i *eqIndexIterator) Next() (core.IndexDataStoreKey, bool, error) { + key, hasValue, err := i.queryResultIterator.Next() + if hasValue { + i.execInfo.IndexesFetched++ + } + return key, hasValue, err +} + +type inIndexIterator struct { + eqIndexIterator + filterValues [][]byte + nextValIndex int + ctx context.Context + store datastore.DSReaderWriter + hasIterator bool +} + +func newInIndexIterator( + indexKey core.IndexDataStoreKey, + filterValues [][]byte, + execInfo *ExecInfo, +) *inIndexIterator { + return &inIndexIterator{ + eqIndexIterator: eqIndexIterator{ + indexKey: indexKey, + execInfo: execInfo, + }, + filterValues: filterValues, + } +} + +func (i *inIndexIterator) nextIterator() (bool, error) { + if i.nextValIndex > 0 { + err := i.eqIndexIterator.Close() + if err != nil { + return false, err + } + } + + if i.nextValIndex >= len(i.filterValues) { + return false, nil + } + + i.filterVal = i.filterValues[i.nextValIndex] + err := i.eqIndexIterator.Init(i.ctx, i.store) + if err != nil { + return false, err + } + i.nextValIndex++ + return true, nil +} + +func (i *inIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { + i.ctx = ctx + i.store = store + var err error + i.hasIterator, err = i.nextIterator() + return err +} + +func (i *inIndexIterator) Next() (core.IndexDataStoreKey, bool, error) { + for i.hasIterator { + key, hasValue, err := i.eqIndexIterator.Next() + if err != nil { + return core.IndexDataStoreKey{}, false, err + } + if !hasValue { + i.hasIterator, err = i.nextIterator() + if err != nil { + return core.IndexDataStoreKey{}, false, err + } + continue + } + return key, true, nil + } + return core.IndexDataStoreKey{}, false, nil +} + +func (i *inIndexIterator) Close() error { + return nil +} + +type errorCheckingFilter struct { + matcher indexMatcher + err error +} + +func (f *errorCheckingFilter) Filter(e query.Entry) bool { + if f.err != nil { + return false + } + indexKey, err := core.NewIndexDataStoreKey(e.Key) + if err != nil { + f.err = err + return false + } + res, err := f.matcher.Match(indexKey) + if err != nil { + f.err = err + return false + } + return res +} + +// execInfoIndexMatcherDecorator is a decorator for indexMatcher that counts the number +// of indexes fetched on every call to Match. +type execInfoIndexMatcherDecorator struct { + matcher indexMatcher + execInfo *ExecInfo +} + +func (d *execInfoIndexMatcherDecorator) Match(key core.IndexDataStoreKey) (bool, error) { + d.execInfo.IndexesFetched++ + return d.matcher.Match(key) +} + +type scanningIndexIterator struct { + queryResultIterator + indexKey core.IndexDataStoreKey + matcher indexMatcher + filter errorCheckingFilter + execInfo *ExecInfo +} + +func (i *scanningIndexIterator) Init(ctx context.Context, store datastore.DSReaderWriter) error { + i.filter.matcher = &execInfoIndexMatcherDecorator{matcher: i.matcher, execInfo: i.execInfo} + + iter, err := store.Query(ctx, query.Query{ + Prefix: i.indexKey.ToString(), + KeysOnly: true, + Filters: []query.Filter{&i.filter}, + }) + if err != nil { + return err + } + i.resultIter = iter + + return nil +} + +func (i *scanningIndexIterator) Next() (core.IndexDataStoreKey, bool, error) { + key, hasValue, err := i.queryResultIterator.Next() + if i.filter.err != nil { + return core.IndexDataStoreKey{}, false, i.filter.err + } + return key, hasValue, err +} + +// checks if the stored index value satisfies the condition +type indexMatcher interface { + Match(core.IndexDataStoreKey) (bool, error) +} + +// indexByteValuesMatcher is a filter that compares the index value with a given value. +// It uses bytes.Compare to compare the values and evaluate the result with evalFunc. +type indexByteValuesMatcher struct { + value []byte + // evalFunc receives a result of bytes.Compare + evalFunc func(int) bool +} + +func (m *indexByteValuesMatcher) Match(key core.IndexDataStoreKey) (bool, error) { + res := bytes.Compare(key.FieldValues[0], m.value) + return m.evalFunc(res), nil +} + +// matcher if _ne condition is met +type neIndexMatcher struct { + value []byte +} + +func (m *neIndexMatcher) Match(key core.IndexDataStoreKey) (bool, error) { + return !bytes.Equal(key.FieldValues[0], m.value), nil +} + +// checks if the index value is or is not in the given array +type indexInArrayMatcher struct { + values map[string]bool + isIn bool +} + +func newNinIndexCmp(values [][]byte, isIn bool) *indexInArrayMatcher { + valuesMap := make(map[string]bool) + for _, v := range values { + valuesMap[string(v)] = true + } + return &indexInArrayMatcher{values: valuesMap, isIn: isIn} +} + +func (m *indexInArrayMatcher) Match(key core.IndexDataStoreKey) (bool, error) { + _, found := m.values[string(key.FieldValues[0])] + return found == m.isIn, nil +} + +// checks if the index value satisfies the LIKE condition +type indexLikeMatcher struct { + hasPrefix bool + hasSuffix bool + startAndEnd []string + isLike bool + value string +} + +func newLikeIndexCmp(filterValue string, isLike bool) *indexLikeMatcher { + matcher := &indexLikeMatcher{ + isLike: isLike, + } + if len(filterValue) >= 2 { + if filterValue[0] == '%' { + matcher.hasPrefix = true + filterValue = strings.TrimPrefix(filterValue, "%") + } + if filterValue[len(filterValue)-1] == '%' { + matcher.hasSuffix = true + filterValue = strings.TrimSuffix(filterValue, "%") + } + if !matcher.hasPrefix && !matcher.hasSuffix { + matcher.startAndEnd = strings.Split(filterValue, "%") + } + } + matcher.value = filterValue + + return matcher +} + +func (m *indexLikeMatcher) Match(key core.IndexDataStoreKey) (bool, error) { + var currentVal string + err := cbor.Unmarshal(key.FieldValues[0], ¤tVal) + if err != nil { + return false, err + } + + return m.doesMatch(currentVal) == m.isLike, nil +} + +func (m *indexLikeMatcher) doesMatch(currentVal string) bool { + switch { + case m.hasPrefix && m.hasSuffix: + return strings.Contains(currentVal, m.value) + case m.hasPrefix: + return strings.HasSuffix(currentVal, m.value) + case m.hasSuffix: + return strings.HasPrefix(currentVal, m.value) + // there might be 2 ends only for LIKE with 1 % in the middle "ab%cd" + case len(m.startAndEnd) == 2: + return strings.HasPrefix(currentVal, m.startAndEnd[0]) && + strings.HasSuffix(currentVal, m.startAndEnd[1]) + default: + return m.value == currentVal + } +} + +func createIndexIterator( + indexDataStoreKey core.IndexDataStoreKey, + indexFilterConditions *mapper.Filter, + execInfo *ExecInfo, +) (indexIterator, error) { + var op string + var filterVal any + for _, indexFilterCond := range indexFilterConditions.Conditions { + condMap := indexFilterCond.(map[connor.FilterKey]any) + var key connor.FilterKey + for key, filterVal = range condMap { + break + } + opKey := key.(*mapper.Operator) + op = opKey.Operation + break + } + + switch op { + case opEq, opGt, opGe, opLt, opLe, opNe: + writableValue := client.NewCBORValue(client.LWW_REGISTER, filterVal) + + valueBytes, err := writableValue.Bytes() + if err != nil { + return nil, err + } + + switch op { + case opEq: + return &eqIndexIterator{ + indexKey: indexDataStoreKey, + filterVal: valueBytes, + execInfo: execInfo, + }, nil + case opGt: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: &indexByteValuesMatcher{ + value: valueBytes, + evalFunc: func(res int) bool { return res > 0 }, + }, + execInfo: execInfo, + }, nil + case opGe: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: &indexByteValuesMatcher{ + value: valueBytes, + evalFunc: func(res int) bool { return res > 0 || res == 0 }, + }, + execInfo: execInfo, + }, nil + case opLt: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: &indexByteValuesMatcher{ + value: valueBytes, + evalFunc: func(res int) bool { return res < 0 }, + }, + execInfo: execInfo, + }, nil + case opLe: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: &indexByteValuesMatcher{ + value: valueBytes, + evalFunc: func(res int) bool { return res < 0 || res == 0 }, + }, + execInfo: execInfo, + }, nil + case opNe: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: &neIndexMatcher{ + value: valueBytes, + }, + execInfo: execInfo, + }, nil + } + case opIn, opNin: + inArr, ok := filterVal.([]any) + if !ok { + return nil, errors.New("invalid _in/_nin value") + } + valArr := make([][]byte, 0, len(inArr)) + for _, v := range inArr { + writableValue := client.NewCBORValue(client.LWW_REGISTER, v) + valueBytes, err := writableValue.Bytes() + if err != nil { + return nil, err + } + valArr = append(valArr, valueBytes) + } + if op == opIn { + return newInIndexIterator(indexDataStoreKey, valArr, execInfo), nil + } else { + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: newNinIndexCmp(valArr, false), + execInfo: execInfo, + }, nil + } + case opLike: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: newLikeIndexCmp(filterVal.(string), true), + execInfo: execInfo, + }, nil + case opNlike: + return &scanningIndexIterator{ + indexKey: indexDataStoreKey, + matcher: newLikeIndexCmp(filterVal.(string), false), + execInfo: execInfo, + }, nil + } + + return nil, errors.New("invalid index filter condition") +} diff --git a/db/fetcher/mocks/fetcher.go b/db/fetcher/mocks/fetcher.go index 12bb386024..1597b13b2e 100644 --- a/db/fetcher/mocks/fetcher.go +++ b/db/fetcher/mocks/fetcher.go @@ -133,142 +133,12 @@ func (_c *Fetcher_FetchNext_Call) RunAndReturn(run func(context.Context) (fetche return _c } -// FetchNextDecoded provides a mock function with given fields: ctx -func (_m *Fetcher) FetchNextDecoded(ctx context.Context) (*client.Document, fetcher.ExecInfo, error) { - ret := _m.Called(ctx) - - var r0 *client.Document - var r1 fetcher.ExecInfo - var r2 error - if rf, ok := ret.Get(0).(func(context.Context) (*client.Document, fetcher.ExecInfo, error)); ok { - return rf(ctx) - } - if rf, ok := ret.Get(0).(func(context.Context) *client.Document); ok { - r0 = rf(ctx) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*client.Document) - } - } - - if rf, ok := ret.Get(1).(func(context.Context) fetcher.ExecInfo); ok { - r1 = rf(ctx) - } else { - r1 = ret.Get(1).(fetcher.ExecInfo) - } - - if rf, ok := ret.Get(2).(func(context.Context) error); ok { - r2 = rf(ctx) - } else { - r2 = ret.Error(2) - } - - return r0, r1, r2 -} - -// Fetcher_FetchNextDecoded_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FetchNextDecoded' -type Fetcher_FetchNextDecoded_Call struct { - *mock.Call -} - -// FetchNextDecoded is a helper method to define mock.On call -// - ctx context.Context -func (_e *Fetcher_Expecter) FetchNextDecoded(ctx interface{}) *Fetcher_FetchNextDecoded_Call { - return &Fetcher_FetchNextDecoded_Call{Call: _e.mock.On("FetchNextDecoded", ctx)} -} - -func (_c *Fetcher_FetchNextDecoded_Call) Run(run func(ctx context.Context)) *Fetcher_FetchNextDecoded_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context)) - }) - return _c -} - -func (_c *Fetcher_FetchNextDecoded_Call) Return(_a0 *client.Document, _a1 fetcher.ExecInfo, _a2 error) *Fetcher_FetchNextDecoded_Call { - _c.Call.Return(_a0, _a1, _a2) - return _c -} - -func (_c *Fetcher_FetchNextDecoded_Call) RunAndReturn(run func(context.Context) (*client.Document, fetcher.ExecInfo, error)) *Fetcher_FetchNextDecoded_Call { - _c.Call.Return(run) - return _c -} - -// FetchNextDoc provides a mock function with given fields: ctx, mapping -func (_m *Fetcher) FetchNextDoc(ctx context.Context, mapping *core.DocumentMapping) ([]byte, core.Doc, fetcher.ExecInfo, error) { - ret := _m.Called(ctx, mapping) - - var r0 []byte - var r1 core.Doc - var r2 fetcher.ExecInfo - var r3 error - if rf, ok := ret.Get(0).(func(context.Context, *core.DocumentMapping) ([]byte, core.Doc, fetcher.ExecInfo, error)); ok { - return rf(ctx, mapping) - } - if rf, ok := ret.Get(0).(func(context.Context, *core.DocumentMapping) []byte); ok { - r0 = rf(ctx, mapping) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]byte) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, *core.DocumentMapping) core.Doc); ok { - r1 = rf(ctx, mapping) - } else { - r1 = ret.Get(1).(core.Doc) - } - - if rf, ok := ret.Get(2).(func(context.Context, *core.DocumentMapping) fetcher.ExecInfo); ok { - r2 = rf(ctx, mapping) - } else { - r2 = ret.Get(2).(fetcher.ExecInfo) - } - - if rf, ok := ret.Get(3).(func(context.Context, *core.DocumentMapping) error); ok { - r3 = rf(ctx, mapping) - } else { - r3 = ret.Error(3) - } - - return r0, r1, r2, r3 -} - -// Fetcher_FetchNextDoc_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'FetchNextDoc' -type Fetcher_FetchNextDoc_Call struct { - *mock.Call -} - -// FetchNextDoc is a helper method to define mock.On call -// - ctx context.Context -// - mapping *core.DocumentMapping -func (_e *Fetcher_Expecter) FetchNextDoc(ctx interface{}, mapping interface{}) *Fetcher_FetchNextDoc_Call { - return &Fetcher_FetchNextDoc_Call{Call: _e.mock.On("FetchNextDoc", ctx, mapping)} -} - -func (_c *Fetcher_FetchNextDoc_Call) Run(run func(ctx context.Context, mapping *core.DocumentMapping)) *Fetcher_FetchNextDoc_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(*core.DocumentMapping)) - }) - return _c -} - -func (_c *Fetcher_FetchNextDoc_Call) Return(_a0 []byte, _a1 core.Doc, _a2 fetcher.ExecInfo, _a3 error) *Fetcher_FetchNextDoc_Call { - _c.Call.Return(_a0, _a1, _a2, _a3) - return _c -} - -func (_c *Fetcher_FetchNextDoc_Call) RunAndReturn(run func(context.Context, *core.DocumentMapping) ([]byte, core.Doc, fetcher.ExecInfo, error)) *Fetcher_FetchNextDoc_Call { - _c.Call.Return(run) - return _c -} - // Init provides a mock function with given fields: ctx, txn, col, fields, filter, docmapper, reverse, showDeleted -func (_m *Fetcher) Init(ctx context.Context, txn datastore.Txn, col *client.CollectionDescription, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool) error { +func (_m *Fetcher) Init(ctx context.Context, txn datastore.Txn, col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool) error { ret := _m.Called(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, datastore.Txn, *client.CollectionDescription, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, datastore.Txn, client.Collection, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error); ok { r0 = rf(ctx, txn, col, fields, filter, docmapper, reverse, showDeleted) } else { r0 = ret.Error(0) @@ -285,7 +155,7 @@ type Fetcher_Init_Call struct { // Init is a helper method to define mock.On call // - ctx context.Context // - txn datastore.Txn -// - col *client.CollectionDescription +// - col client.Collection // - fields []client.FieldDescription // - filter *mapper.Filter // - docmapper *core.DocumentMapping @@ -295,9 +165,9 @@ func (_e *Fetcher_Expecter) Init(ctx interface{}, txn interface{}, col interface return &Fetcher_Init_Call{Call: _e.mock.On("Init", ctx, txn, col, fields, filter, docmapper, reverse, showDeleted)} } -func (_c *Fetcher_Init_Call) Run(run func(ctx context.Context, txn datastore.Txn, col *client.CollectionDescription, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool)) *Fetcher_Init_Call { +func (_c *Fetcher_Init_Call) Run(run func(ctx context.Context, txn datastore.Txn, col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, reverse bool, showDeleted bool)) *Fetcher_Init_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(datastore.Txn), args[2].(*client.CollectionDescription), args[3].([]client.FieldDescription), args[4].(*mapper.Filter), args[5].(*core.DocumentMapping), args[6].(bool), args[7].(bool)) + run(args[0].(context.Context), args[1].(datastore.Txn), args[2].(client.Collection), args[3].([]client.FieldDescription), args[4].(*mapper.Filter), args[5].(*core.DocumentMapping), args[6].(bool), args[7].(bool)) }) return _c } @@ -307,7 +177,7 @@ func (_c *Fetcher_Init_Call) Return(_a0 error) *Fetcher_Init_Call { return _c } -func (_c *Fetcher_Init_Call) RunAndReturn(run func(context.Context, datastore.Txn, *client.CollectionDescription, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error) *Fetcher_Init_Call { +func (_c *Fetcher_Init_Call) RunAndReturn(run func(context.Context, datastore.Txn, client.Collection, []client.FieldDescription, *mapper.Filter, *core.DocumentMapping, bool, bool) error) *Fetcher_Init_Call { _c.Call.Return(run) return _c } diff --git a/db/fetcher/mocks/utils.go b/db/fetcher/mocks/utils.go index 3ffe12fce2..298d5b2ad6 100644 --- a/db/fetcher/mocks/utils.go +++ b/db/fetcher/mocks/utils.go @@ -13,9 +13,6 @@ package mocks import ( "testing" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/stretchr/testify/mock" ) @@ -33,9 +30,6 @@ func NewStubbedFetcher(t *testing.T) *Fetcher { ).Maybe().Return(nil) f.EXPECT().Start(mock.Anything, mock.Anything).Maybe().Return(nil) f.EXPECT().FetchNext(mock.Anything).Maybe().Return(nil, nil) - f.EXPECT().FetchNextDoc(mock.Anything, mock.Anything).Maybe(). - Return(NewEncodedDocument(t), core.Doc{}, nil) - f.EXPECT().FetchNextDecoded(mock.Anything).Maybe().Return(&client.Document{}, nil) f.EXPECT().Close().Maybe().Return(nil) return f } diff --git a/db/fetcher/versioned.go b/db/fetcher/versioned.go index f1c7b6a9de..4ab8ef54a7 100644 --- a/db/fetcher/versioned.go +++ b/db/fetcher/versioned.go @@ -72,7 +72,7 @@ var ( // Future optimizations: // - Incremental checkpoint/snapshotting // - Reverse traversal (starting from the current state, and working backwards) -// - Create a efficient memory store for in-order traversal (BTree, etc) +// - Create an efficient memory store for in-order traversal (BTree, etc) // // Note: Should we transition this state traversal into the CRDT objects themselves, and not // within a new fetcher? @@ -92,7 +92,7 @@ type VersionedFetcher struct { queuedCids *list.List - col *client.CollectionDescription + col client.Collection // @todo index *client.IndexDescription mCRDTs map[uint32]crdt.MerkleCRDT } @@ -101,7 +101,7 @@ type VersionedFetcher struct { func (vf *VersionedFetcher) Init( ctx context.Context, txn datastore.Txn, - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, @@ -240,7 +240,7 @@ func (vf *VersionedFetcher) seekTo(c cid.Cid) error { } } - // we now have all the the required state stored + // we now have all the required state stored // in our transient local Version_Index, we now need to // transfer it to the Primary_Index. @@ -258,7 +258,7 @@ func (vf *VersionedFetcher) seekTo(c cid.Cid) error { func (vf *VersionedFetcher) seekNext(c cid.Cid, topParent bool) error { // check if cid block exists in the global store, handle err - // @todo: Find an effecient way to determine if a CID is a member of a + // @todo: Find an efficient way to determine if a CID is a member of a // DocKey State graph // @body: We could possibly append the DocKey to the CID either as a // child key, or an instance on the CID key. @@ -324,7 +324,7 @@ func (vf *VersionedFetcher) seekNext(c cid.Cid, topParent bool) error { } // merge in the state of the IPLD Block identified by CID c into the VersionedFetcher state. -// Requires the CID to already exists in the DAGStore. +// Requires the CID to already exist in the DAGStore. // This function only works for merging Composite MerkleCRDT objects. // // First it checks for the existence of the block, @@ -357,13 +357,14 @@ func (vf *VersionedFetcher) merge(c cid.Cid) error { return err } - fieldID := vf.col.Schema.GetFieldKey(l.Name) - if fieldID == uint32(0) { + schema := vf.col.Schema() + field, ok := vf.col.Description().GetFieldByName(l.Name, &schema) + if !ok { return client.NewErrFieldNotExist(l.Name) } // @todo: Right now we ONLY handle LWW_REGISTER, need to swith on this and // get CType from descriptions - if err := vf.processNode(fieldID, subNd, client.LWW_REGISTER, l.Name); err != nil { + if err := vf.processNode(uint32(field.ID), subNd, client.LWW_REGISTER, l.Name); err != nil { return err } } @@ -380,7 +381,7 @@ func (vf *VersionedFetcher) processNode( // handle CompositeDAG mcrdt, exists := vf.mCRDTs[crdtIndex] if !exists { - key, err := base.MakePrimaryIndexKeyForCRDT(*vf.col, ctype, vf.key, fieldName) + key, err := base.MakePrimaryIndexKeyForCRDT(vf.col.Description(), vf.col.Schema(), ctype, vf.key, fieldName) if err != nil { return err } @@ -404,7 +405,7 @@ func (vf *VersionedFetcher) processNode( return err } - _, err = mcrdt.Clock().ProcessNode(vf.ctx, nil, delta, nd) + err = mcrdt.Clock().ProcessNode(vf.ctx, delta, nd) return err } diff --git a/db/fetcher_test.go b/db/fetcher_test.go index e2c3647792..f7de9bf036 100644 --- a/db/fetcher_test.go +++ b/db/fetcher_test.go @@ -16,169 +16,13 @@ import ( "github.com/stretchr/testify/assert" - "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/db/fetcher" ) -func newTestCollectionDescription() client.CollectionDescription { - return client.CollectionDescription{ - Name: "users", - ID: uint32(1), - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - ID: client.FieldID(1), - Kind: client.FieldKind_DocKey, - }, - { - Name: "Name", - ID: client.FieldID(2), - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - { - Name: "Age", - ID: client.FieldID(3), - Kind: client.FieldKind_INT, - Typ: client.LWW_REGISTER, - }, - }, - }, - } -} - -func newTestFetcher(ctx context.Context, txn datastore.Txn) (*fetcher.DocumentFetcher, error) { - df := new(fetcher.DocumentFetcher) - desc := newTestCollectionDescription() - err := df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) - if err != nil { - return nil, err - } - return df, nil -} - -func TestFetcherInit(t *testing.T) { - _, err := newTestFetcher(context.Background(), nil) - assert.NoError(t, err) -} - -func TestFetcherStart(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - if err != nil { - t.Error(err) - return - } - txn, err := db.NewTxn(ctx, true) - if err != nil { - t.Error(err) - return - } - df, err := newTestFetcher(ctx, txn) - assert.NoError(t, err) - - err = df.Start(ctx, core.Spans{}) - assert.NoError(t, err) -} - func TestFetcherStartWithoutInit(t *testing.T) { ctx := context.Background() df := new(fetcher.DocumentFetcher) err := df.Start(ctx, core.Spans{}) assert.Error(t, err) } - -func TestMakeIndexPrefixKey(t *testing.T) { - desc := newTestCollectionDescription() - key := base.MakeCollectionKey(desc) - assert.Equal(t, "/1", key.ToString()) -} - -func TestFetcherGetAllPrimaryIndexEncodedDocSingle(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{ - "Name": "John", - "Age": 21 - }`)) - assert.NoError(t, err) - err = col.Save(ctx, doc) - assert.NoError(t, err) - - txn, err := db.NewTxn(ctx, true) - if err != nil { - t.Error(err) - return - } - - // db.printDebugDB() - - df := new(fetcher.DocumentFetcher) - desc := col.Description() - err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) - assert.NoError(t, err) - - err = df.Start(ctx, core.Spans{}) - assert.NoError(t, err) - - encdoc, _, err := df.FetchNext(ctx) - assert.NoError(t, err) - assert.NotNil(t, encdoc) -} - -func TestFetcherGetAllPrimaryIndexEncodedDocMultiple(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - assert.NoError(t, err) - - col, err := newTestCollectionWithSchema(t, ctx, db) - assert.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{ - "Name": "John", - "Age": 21 - }`)) - assert.NoError(t, err) - err = col.Save(ctx, doc) - assert.NoError(t, err) - - doc, err = client.NewDocFromJSON([]byte(`{ - "Name": "Alice", - "Age": 27 - }`)) - assert.NoError(t, err) - err = col.Save(ctx, doc) - assert.NoError(t, err) - - txn, err := db.NewTxn(ctx, true) - if err != nil { - t.Error(err) - return - } - - // db.printDebugDB() - - df := new(fetcher.DocumentFetcher) - desc := col.Description() - err = df.Init(ctx, txn, &desc, desc.Schema.Fields, nil, nil, false, false) - assert.NoError(t, err) - - err = df.Start(ctx, core.Spans{}) - assert.NoError(t, err) - - encdoc, _, err := df.FetchNext(ctx) - assert.NoError(t, err) - assert.NotNil(t, encdoc) - encdoc, _, err = df.FetchNext(ctx) - assert.NoError(t, err) - assert.NotNil(t, encdoc) -} diff --git a/db/index.go b/db/index.go index 2c5ea2d6b2..ce9e55f519 100644 --- a/db/index.go +++ b/db/index.go @@ -14,10 +14,6 @@ import ( "context" "time" - ds "github.com/ipfs/go-datastore" - - "github.com/ipfs/go-datastore/query" - "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" @@ -47,7 +43,7 @@ func canConvertIndexFieldValue[T any](val any) bool { func getValidateIndexFieldFunc(kind client.FieldKind) func(any) bool { switch kind { - case client.FieldKind_STRING: + case client.FieldKind_STRING, client.FieldKind_FOREIGN_OBJECT: return canConvertIndexFieldValue[string] case client.FieldKind_INT: return canConvertIndexFieldValue[int64] @@ -86,10 +82,8 @@ func NewCollectionIndex( return nil, NewErrIndexDescHasNoFields(desc) } index := &collectionSimpleIndex{collection: collection, desc: desc} - schema := collection.Description().Schema - fieldID := client.FieldID(schema.GetFieldKey(desc.Fields[0].Name)) - field, foundField := collection.Description().GetFieldByID(fieldID) - if fieldID == client.FieldID(0) || !foundField { + field, foundField := collection.Schema().GetField(desc.Fields[0].Name) + if !foundField { return nil, NewErrIndexDescHasNonExistingField(desc, desc.Fields[0].Name) } var e error @@ -172,36 +166,18 @@ func (i *collectionSimpleIndex) Update( if err != nil { return err } - err = txn.Datastore().Delete(ctx, key.ToDS()) + exists, err := txn.Datastore().Has(ctx, key.ToDS()) if err != nil { return err } - return i.Save(ctx, txn, newDoc) -} - -func fetchKeysForPrefix( - ctx context.Context, - prefix string, - storage ds.Read, -) ([]ds.Key, error) { - q, err := storage.Query(ctx, query.Query{Prefix: prefix}) - if err != nil { - return nil, err - } - - keys := make([]ds.Key, 0) - for res := range q.Next() { - if res.Error != nil { - _ = q.Close() - return nil, res.Error - } - keys = append(keys, ds.NewKey(res.Key)) + if !exists { + return NewErrCorruptedIndex(i.desc.Name) } - if err = q.Close(); err != nil { - return nil, err + err = txn.Datastore().Delete(ctx, key.ToDS()) + if err != nil { + return err } - - return keys, nil + return i.Save(ctx, txn, newDoc) } // RemoveAll remove all artifacts of the index from the storage, i.e. all index @@ -211,7 +187,7 @@ func (i *collectionSimpleIndex) RemoveAll(ctx context.Context, txn datastore.Txn prefixKey.CollectionID = i.collection.ID() prefixKey.IndexID = i.desc.ID - keys, err := fetchKeysForPrefix(ctx, prefixKey.ToString(), txn.Datastore()) + keys, err := datastore.FetchKeysForPrefix(ctx, prefixKey.ToString(), txn.Datastore()) if err != nil { return err } diff --git a/db/index_test.go b/db/index_test.go index dce7e65bb4..e5682b551c 100644 --- a/db/index_test.go +++ b/db/index_test.go @@ -15,6 +15,7 @@ import ( "encoding/binary" "encoding/json" "fmt" + "strings" "testing" ds "github.com/ipfs/go-datastore" @@ -47,79 +48,68 @@ const ( testUsersColIndexName = "user_name" testUsersColIndexAge = "user_age" testUsersColIndexWeight = "user_weight" - - userColVersionID = "bafkreiefzlx2xsfaxixs24hcqwwqpa3nuqbutkapasymk3d5v4fxa4rlhy" ) type indexTestFixture struct { ctx context.Context db *implicitTxnDB txn datastore.Txn - users *collection + users client.Collection t *testing.T } -func getUsersCollectionDesc() client.CollectionDescription { - return client.CollectionDescription{ - Name: usersColName, - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: usersNameFieldName, - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - { - Name: usersAgeFieldName, - Kind: client.FieldKind_INT, - Typ: client.LWW_REGISTER, - }, - { - Name: usersWeightFieldName, - Kind: client.FieldKind_FLOAT, - Typ: client.LWW_REGISTER, - }, - }, - }, - } -} +func (f *indexTestFixture) getUsersCollectionDesc() client.Collection { + _, err := f.db.AddSchema( + f.ctx, + fmt.Sprintf( + `type %s { + %s: String + %s: Int + %s: Float + }`, + usersColName, + usersNameFieldName, + usersAgeFieldName, + usersWeightFieldName, + ), + ) + require.NoError(f.t, err) -func getProductsCollectionDesc() client.CollectionDescription { - return client.CollectionDescription{ - Name: productsColName, - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: productsIDFieldName, - Kind: client.FieldKind_INT, - Typ: client.LWW_REGISTER, - }, - { - Name: productsPriceFieldName, - Kind: client.FieldKind_FLOAT, - Typ: client.LWW_REGISTER, - }, - { - Name: productsCategoryFieldName, - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - { - Name: productsAvailableFieldName, - Kind: client.FieldKind_BOOL, - Typ: client.LWW_REGISTER, - }, - }, - }, - } + col, err := f.db.GetCollectionByName(f.ctx, usersColName) + require.NoError(f.t, err) + + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) + + return col +} + +func (f *indexTestFixture) getProductsCollectionDesc() client.Collection { + _, err := f.db.AddSchema( + f.ctx, + fmt.Sprintf( + `type %s { + %s: Int + %s: Float + %s: String + %s: Boolean + }`, + productsColName, + productsIDFieldName, + productsPriceFieldName, + productsCategoryFieldName, + productsAvailableFieldName, + ), + ) + require.NoError(f.t, err) + + col, err := f.db.GetCollectionByName(f.ctx, productsColName) + require.NoError(f.t, err) + + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) + + return col } func newIndexTestFixtureBare(t *testing.T) *indexTestFixture { @@ -139,7 +129,7 @@ func newIndexTestFixtureBare(t *testing.T) *indexTestFixture { func newIndexTestFixture(t *testing.T) *indexTestFixture { f := newIndexTestFixtureBare(t) - f.users = f.createCollection(getUsersCollectionDesc()) + f.users = f.getUsersCollectionDesc() return f } @@ -247,18 +237,6 @@ func (f *indexTestFixture) getCollectionIndexes(colName string) ([]client.IndexD return f.db.fetchCollectionIndexDescriptions(f.ctx, f.txn, colName) } -func (f *indexTestFixture) createCollection( - desc client.CollectionDescription, -) *collection { - col, err := f.db.createCollection(f.ctx, f.txn, desc) - assert.NoError(f.t, err) - err = f.txn.Commit(f.ctx) - assert.NoError(f.t, err) - f.txn, err = f.db.NewTxn(f.ctx, false) - assert.NoError(f.t, err) - return col.(*collection) -} - func TestCreateIndex_IfFieldsIsEmpty_ReturnError(t *testing.T) { f := newIndexTestFixture(t) @@ -324,28 +302,6 @@ func TestCreateIndex_IfFieldHasNoDirection_DefaultToAsc(t *testing.T) { assert.Equal(t, client.Ascending, newDesc.Fields[0].Direction) } -func TestCreateIndex_IfNameIsNotSpecified_Generate(t *testing.T) { - f := newIndexTestFixtureBare(t) - colDesc := getUsersCollectionDesc() - const colName = "UsErS" - const fieldName = "NaMe" - colDesc.Name = colName - colDesc.Schema.Name = colName // Which one should we use? - colDesc.Schema.Fields[1].Name = fieldName - f.users = f.createCollection(colDesc) - - desc := client.IndexDescription{ - Name: "", - Fields: []client.IndexedFieldDescription{ - {Name: fieldName, Direction: client.Ascending}, - }, - } - - newDesc, err := f.createCollectionIndex(desc) - assert.NoError(t, err) - assert.Equal(t, colName+"_"+fieldName+"_ASC", newDesc.Name) -} - func TestCreateIndex_IfSingleFieldInDescOrder_ReturnError(t *testing.T) { f := newIndexTestFixture(t) @@ -422,74 +378,6 @@ func TestCreateIndex_ShouldSaveToSystemStorage(t *testing.T) { assert.Equal(t, desc, deserialized) } -func TestCreateIndex_IfStorageFails_ReturnError(t *testing.T) { - testErr := errors.New("test error") - - testCases := []struct { - Name string - ExpectedError error - GetMockSystemstore func(t *testing.T) *mocks.DSReaderWriter - AlterDescription func(desc *client.IndexDescription) - }{ - { - Name: "call Has() for custom index name", - ExpectedError: testErr, - GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { - store := mocks.NewDSReaderWriter(t) - store.EXPECT().Has(mock.Anything, mock.Anything).Unset() - store.EXPECT().Has(mock.Anything, mock.Anything).Return(false, testErr) - return store - }, - AlterDescription: func(desc *client.IndexDescription) {}, - }, - { - Name: "call Has() for generated index name", - ExpectedError: testErr, - GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { - store := mocks.NewDSReaderWriter(t) - store.EXPECT().Has(mock.Anything, mock.Anything).Unset() - store.EXPECT().Has(mock.Anything, mock.Anything).Return(false, testErr) - return store - }, - AlterDescription: func(desc *client.IndexDescription) { - desc.Name = "" - }, - }, - { - Name: "fails to store index description", - ExpectedError: NewErrInvalidStoredIndex(nil), - GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { - store := mocks.NewDSReaderWriter(t) - store.EXPECT().Put(mock.Anything, mock.Anything, mock.Anything).Unset() - key := core.NewCollectionIndexKey(usersColName, testUsersColIndexName) - store.EXPECT().Put(mock.Anything, key.ToDS(), mock.Anything).Return(testErr) - return store - }, - AlterDescription: func(desc *client.IndexDescription) {}, - }, - } - - for _, testCase := range testCases { - f := newIndexTestFixture(t) - - mockedTxn := f.mockTxn() - - mockedTxn.MockSystemstore = testCase.GetMockSystemstore(t) - f.stubSystemStore(mockedTxn.MockSystemstore.EXPECT()) - mockedTxn.EXPECT().Systemstore().Unset() - mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() - - desc := client.IndexDescription{ - Name: testUsersColIndexName, - Fields: []client.IndexedFieldDescription{{Name: usersNameFieldName}}, - } - testCase.AlterDescription(&desc) - - _, err := f.createCollectionIndex(desc) - assert.ErrorIs(t, err, testErr, testCase.Name) - } -} - func TestCreateIndex_IfCollectionDoesntExist_ReturnError(t *testing.T) { f := newIndexTestFixture(t) @@ -515,8 +403,8 @@ func TestCreateIndex_IfPropertyDoesntExist_ReturnError(t *testing.T) { func TestCreateIndex_WithMultipleCollectionsAndIndexes_AssignIncrementedIDPerCollection(t *testing.T) { f := newIndexTestFixtureBare(t) - users := f.createCollection(getUsersCollectionDesc()) - products := f.createCollection(getProductsCollectionDesc()) + users := f.getUsersCollectionDesc() + products := f.getProductsCollectionDesc() makeIndex := func(fieldName string) client.IndexDescription { return client.IndexDescription{ @@ -580,50 +468,21 @@ func TestCreateIndex_ShouldUpdateCollectionsDescription(t *testing.T) { f.users.Description().Indexes) } -func TestCreateIndex_NewCollectionDescription_ShouldIncludeIndexDescription(t *testing.T) { - f := newIndexTestFixture(t) - - _, err := f.createCollectionIndex(getUsersIndexDescOnName()) - require.NoError(t, err) - - desc := getUsersIndexDescOnAge() - desc.Name = "" - _, err = f.createCollectionIndex(desc) - require.NoError(t, err) - - cols, err := f.db.getAllCollections(f.ctx, f.txn) - require.NoError(t, err) - - require.Equal(t, 1, len(cols)) - col := cols[0] - require.Equal(t, 2, len(col.Description().Indexes)) - require.NotEmpty(t, col.Description().Indexes[0].Name) - require.NotEmpty(t, col.Description().Indexes[1].Name) -} - func TestCreateIndex_IfAttemptToIndexOnUnsupportedType_ReturnError(t *testing.T) { f := newIndexTestFixtureBare(t) const unsupportedKind = client.FieldKind_BOOL_ARRAY - desc := client.CollectionDescription{ - Name: "testTypeCol", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "field", - Kind: unsupportedKind, - Typ: client.LWW_REGISTER, - }, - }, - }, - } + _, err := f.db.AddSchema( + f.ctx, + `type testTypeCol { + field: [Boolean!] + }`, + ) + require.NoError(f.t, err) - collection := f.createCollection(desc) + collection, err := f.db.GetCollectionByName(f.ctx, "testTypeCol") + require.NoError(f.t, err) indexDesc := client.IndexDescription{ Fields: []client.IndexedFieldDescription{ @@ -631,41 +490,14 @@ func TestCreateIndex_IfAttemptToIndexOnUnsupportedType_ReturnError(t *testing.T) }, } - _, err := f.createCollectionIndexFor(collection.Name(), indexDesc) + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) + + _, err = f.createCollectionIndexFor(collection.Name(), indexDesc) require.ErrorIs(f.t, err, NewErrUnsupportedIndexFieldType(unsupportedKind)) f.commitTxn() } -func TestCreateIndex_IfFailedToReadIndexUponRetrievingCollectionDesc_ReturnError(t *testing.T) { - f := newIndexTestFixture(t) - - testErr := errors.New("test error") - - mockedTxn := f.mockTxn().ClearSystemStore() - onSystemStore := mockedTxn.MockSystemstore.EXPECT() - - colIndexKey := core.NewCollectionIndexKey(f.users.Description().Name, "") - matchPrefixFunc := func(q query.Query) bool { - res := q.Prefix == colIndexKey.ToDS().String() - return res - } - - onSystemStore.Query(mock.Anything, mock.MatchedBy(matchPrefixFunc)).Return(nil, testErr) - - descData, err := json.Marshal(getUsersCollectionDesc()) - require.NoError(t, err) - - onSystemStore.Query(mock.Anything, mock.Anything). - Return(mocks.NewQueryResultsWithValues(t, []byte("schemaID")), nil) - onSystemStore.Get(mock.Anything, mock.Anything).Unset() - onSystemStore.Get(mock.Anything, mock.Anything).Return(descData, nil) - - f.stubSystemStore(onSystemStore) - - _, err = f.db.getAllCollections(f.ctx, f.txn) - require.ErrorIs(t, err, testErr) -} - func TestGetIndexes_ShouldReturnListOfAllExistingIndexes(t *testing.T) { f := newIndexTestFixture(t) @@ -676,7 +508,9 @@ func TestGetIndexes_ShouldReturnListOfAllExistingIndexes(t *testing.T) { _, err := f.createCollectionIndexFor(usersColName, usersIndexDesc) assert.NoError(t, err) - f.createCollection(getProductsCollectionDesc()) + f.commitTxn() + + f.getProductsCollectionDesc() productsIndexDesc := client.IndexDescription{ Name: "products_description_index", Fields: []client.IndexedFieldDescription{{Name: productsPriceFieldName}}, @@ -703,7 +537,7 @@ func TestGetIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { assert.NoError(t, err) _, err = f.getAllIndexes() - assert.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) + assert.ErrorIs(t, err, datastore.NewErrInvalidStoredValue(nil)) } func TestGetIndexes_IfInvalidIndexKeyIsStored_ReturnError(t *testing.T) { @@ -728,14 +562,15 @@ func TestGetIndexes_IfInvalidIndexKeyIsStored_ReturnError(t *testing.T) { func TestGetIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { f := newIndexTestFixture(t) + testErr := errors.New("test error") + mockedTxn := f.mockTxn() mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Unset() - mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). - Return(nil, errors.New("test error")) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(nil, testErr) _, err := f.getAllIndexes() - assert.ErrorIs(t, err, NewErrFailedToCreateCollectionQuery(nil)) + assert.ErrorIs(t, err, testErr) } func TestGetIndexes_IfSystemStoreFails_ShouldCloseIterator(t *testing.T) { @@ -779,44 +614,7 @@ func TestGetIndexes_IfSystemStoreHasInvalidData_ReturnError(t *testing.T) { mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(q, nil) _, err := f.getAllIndexes() - assert.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) -} - -func TestGetIndexes_IfFailsToReadSeqNumber_ReturnError(t *testing.T) { - testErr := errors.New("test error") - - testCases := []struct { - Name string - StubSystemStore func(*mocks.DSReaderWriter_Expecter, core.Key) - }{ - { - Name: "Read Sequence Number", - StubSystemStore: func(onSystemStore *mocks.DSReaderWriter_Expecter, seqKey core.Key) { - onSystemStore.Get(mock.Anything, seqKey.ToDS()).Return(nil, testErr) - }, - }, - { - Name: "Increment Sequence Number", - StubSystemStore: func(onSystemStore *mocks.DSReaderWriter_Expecter, seqKey core.Key) { - onSystemStore.Put(mock.Anything, seqKey.ToDS(), mock.Anything).Return(testErr) - }, - }, - } - - for _, tc := range testCases { - f := newIndexTestFixture(t) - - mockedTxn := f.mockTxn() - onSystemStore := mockedTxn.MockSystemstore.EXPECT() - f.resetSystemStoreStubs(onSystemStore) - - seqKey := core.NewSequenceKey(fmt.Sprintf("%s/%d", core.COLLECTION_INDEX, f.users.ID())) - tc.StubSystemStore(onSystemStore, seqKey) - f.stubSystemStore(onSystemStore) - - _, err := f.createCollectionIndexFor(f.users.Name(), getUsersIndexDescOnName()) - assert.ErrorIs(t, err, testErr) - } + assert.ErrorIs(t, err, datastore.NewErrInvalidStoredValue(nil)) } func TestGetCollectionIndexes_ShouldReturnListOfCollectionIndexes(t *testing.T) { @@ -829,11 +627,17 @@ func TestGetCollectionIndexes_ShouldReturnListOfCollectionIndexes(t *testing.T) _, err := f.createCollectionIndexFor(usersColName, usersIndexDesc) assert.NoError(t, err) - f.createCollection(getProductsCollectionDesc()) + f.commitTxn() + + f.getProductsCollectionDesc() productsIndexDesc := client.IndexDescription{ Name: "products_description_index", Fields: []client.IndexedFieldDescription{{Name: productsPriceFieldName}}, } + + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) + _, err = f.createCollectionIndexFor(productsColName, productsIndexDesc) assert.NoError(t, err) @@ -853,15 +657,16 @@ func TestGetCollectionIndexes_ShouldReturnListOfCollectionIndexes(t *testing.T) func TestGetCollectionIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { f := newIndexTestFixture(t) + testErr := errors.New("test error") + mockedTxn := f.mockTxn() mockedTxn.MockSystemstore = mocks.NewDSReaderWriter(t) - mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything). - Return(nil, errors.New("test error")) + mockedTxn.MockSystemstore.EXPECT().Query(mock.Anything, mock.Anything).Return(nil, testErr) mockedTxn.EXPECT().Systemstore().Unset() mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore) _, err := f.getCollectionIndexes(usersColName) - assert.ErrorIs(t, err, NewErrFailedToCreateCollectionQuery(nil)) + assert.ErrorIs(t, err, testErr) } func TestGetCollectionIndexes_IfSystemStoreFails_ShouldCloseIterator(t *testing.T) { @@ -902,7 +707,7 @@ func TestGetCollectionIndexes_IfInvalidIndexIsStored_ReturnError(t *testing.T) { assert.NoError(t, err) _, err = f.getCollectionIndexes(usersColName) - assert.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) + assert.ErrorIs(t, err, datastore.NewErrInvalidStoredValue(nil)) } func TestCollectionGetIndexes_ShouldReturnIndexes(t *testing.T) { @@ -967,7 +772,7 @@ func TestCollectionGetIndexes_IfSystemStoreFails_ReturnError(t *testing.T) { }, { Name: "Query iterator returns invalid value", - ExpectedError: NewErrInvalidStoredIndex(nil), + ExpectedError: datastore.NewErrInvalidStoredValue(nil), GetMockSystemstore: func(t *testing.T) *mocks.DSReaderWriter { store := mocks.NewDSReaderWriter(t) store.EXPECT().Query(mock.Anything, mock.Anything). @@ -1019,27 +824,23 @@ func TestCollectionGetIndexes_IfFailsToCreateTxn_ShouldNotCache(t *testing.T) { func TestCollectionGetIndexes_IfStoredIndexWithUnsupportedType_ReturnError(t *testing.T) { f := newIndexTestFixtureBare(t) + f.getUsersCollectionDesc() const unsupportedKind = client.FieldKind_BOOL_ARRAY + _, err := f.db.AddSchema( + f.ctx, + `type testTypeCol { + name: String + field: [Boolean!] + }`, + ) + require.NoError(f.t, err) - desc := client.CollectionDescription{ - Name: "testTypeCol", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "field", - Kind: unsupportedKind, - Typ: client.LWW_REGISTER, - }, - }, - }, - } + collection, err := f.db.GetCollectionByName(f.ctx, "testTypeCol") + require.NoError(f.t, err) - collection := f.createCollection(desc) + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) indexDesc := client.IndexDescription{ Fields: []client.IndexedFieldDescription{ @@ -1119,17 +920,6 @@ func TestCollectionGetIndexes_IfIndexIsDropped_ReturnUpdateIndexes(t *testing.T) func TestCollectionGetIndexes_ShouldReturnIndexesInOrderedByName(t *testing.T) { f := newIndexTestFixtureBare(t) - colDesc := client.CollectionDescription{ - Name: "testCollection", - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - }, - }, - } const ( num = 30 fieldNamePrefix = "field_" @@ -1140,17 +930,33 @@ func TestCollectionGetIndexes_ShouldReturnIndexesInOrderedByName(t *testing.T) { return fmt.Sprintf("%02d", i) } + builder := strings.Builder{} + builder.WriteString("type testCollection {\n") + for i := 1; i <= num; i++ { - colDesc.Schema.Fields = append(colDesc.Schema.Fields, - client.FieldDescription{ - Name: fieldNamePrefix + toSuffix(i), - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }) + _, err := builder.WriteString(fieldNamePrefix) + require.NoError(f.t, err) + + _, err = builder.WriteString(toSuffix(i)) + require.NoError(f.t, err) + + _, err = builder.WriteString(": String\n") + require.NoError(f.t, err) } + _, err := builder.WriteString("}") + require.NoError(f.t, err) + + _, err = f.db.AddSchema( + f.ctx, + builder.String(), + ) + require.NoError(f.t, err) - collection := f.createCollection(colDesc) + collection, err := f.db.GetCollectionByName(f.ctx, "testCollection") + require.NoError(f.t, err) + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) for i := 1; i <= num; i++ { iStr := toSuffix(i) indexDesc := client.IndexDescription{ @@ -1189,8 +995,7 @@ func TestDropIndex_ShouldDeleteIndex(t *testing.T) { func TestDropIndex_IfStorageFails_ReturnError(t *testing.T) { f := newIndexTestFixture(t) desc := f.createUserCollectionIndexOnName() - - f.db.Close(f.ctx) + f.db.Close() err := f.dropIndex(productsColName, desc.Name) assert.Error(t, err) @@ -1203,21 +1008,6 @@ func TestDropIndex_IfCollectionDoesntExist_ReturnError(t *testing.T) { assert.ErrorIs(t, err, NewErrCanNotReadCollection(usersColName, nil)) } -func TestDropIndex_IfFailsToQuerySystemStorage_ReturnError(t *testing.T) { - f := newIndexTestFixture(t) - desc := f.createUserCollectionIndexOnName() - - testErr := errors.New("test error") - - mockTxn := f.mockTxn().ClearSystemStore() - systemStoreOn := mockTxn.MockSystemstore.EXPECT() - systemStoreOn.Query(mock.Anything, mock.Anything).Return(nil, testErr) - f.stubSystemStore(systemStoreOn) - - err := f.dropIndex(usersColName, desc.Name) - require.ErrorIs(t, err, testErr) -} - func TestDropIndex_IfFailsToCreateTxn_ReturnError(t *testing.T) { f := newIndexTestFixture(t) @@ -1317,7 +1107,7 @@ func TestDropAllIndexes_ShouldDeleteAllIndexes(t *testing.T) { assert.Equal(t, 2, f.countIndexPrefixes(usersColName, "")) - err = f.users.dropAllIndexes(f.ctx, f.txn) + err = f.users.(*collection).dropAllIndexes(f.ctx, f.txn) assert.NoError(t, err) assert.Equal(t, 0, f.countIndexPrefixes(usersColName, "")) @@ -1326,10 +1116,9 @@ func TestDropAllIndexes_ShouldDeleteAllIndexes(t *testing.T) { func TestDropAllIndexes_IfStorageFails_ReturnError(t *testing.T) { f := newIndexTestFixture(t) f.createUserCollectionIndexOnName() + f.db.Close() - f.db.Close(f.ctx) - - err := f.users.dropAllIndexes(f.ctx, f.txn) + err := f.users.(*collection).dropAllIndexes(f.ctx, f.txn) assert.Error(t, err) } @@ -1384,7 +1173,7 @@ func TestDropAllIndexes_IfSystemStorageFails_ReturnError(t *testing.T) { mockedTxn.EXPECT().Systemstore().Unset() mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() - err := f.users.dropAllIndexes(f.ctx, f.txn) + err := f.users.(*collection).dropAllIndexes(f.ctx, f.txn) assert.ErrorIs(t, err, testErr, testCase.Name) } } @@ -1404,7 +1193,7 @@ func TestDropAllIndexes_ShouldCloseQueryIterator(t *testing.T) { mockedTxn.EXPECT().Systemstore().Unset() mockedTxn.EXPECT().Systemstore().Return(mockedTxn.MockSystemstore).Maybe() - _ = f.users.dropAllIndexes(f.ctx, f.txn) + _ = f.users.(*collection).dropAllIndexes(f.ctx, f.txn) } func TestNewCollectionIndex_IfDescriptionHasNoFields_ReturnError(t *testing.T) { diff --git a/db/indexed_docs_test.go b/db/indexed_docs_test.go index b62cb992d6..6503429c96 100644 --- a/db/indexed_docs_test.go +++ b/db/indexed_docs_test.go @@ -230,19 +230,6 @@ func (f *indexTestFixture) stubSystemStore(systemStoreOn *mocks.DSReaderWriter_E systemStoreOn.Query(mock.Anything, mock.Anything).Maybe(). Return(mocks.NewQueryResultsWithValues(f.t), nil) - colKey := core.NewCollectionKey(usersColName) - systemStoreOn.Get(mock.Anything, colKey.ToDS()).Maybe().Return([]byte(userColVersionID), nil) - - colVersionIDKey := core.NewCollectionSchemaVersionKey(userColVersionID) - colDesc := getUsersCollectionDesc() - colDesc.ID = 1 - for i := range colDesc.Schema.Fields { - colDesc.Schema.Fields[i].ID = client.FieldID(i) - } - colDescBytes, err := json.Marshal(colDesc) - require.NoError(f.t, err) - systemStoreOn.Get(mock.Anything, colVersionIDKey.ToDS()).Maybe().Return(colDescBytes, nil) - colIndexOnNameKey := core.NewCollectionIndexKey(usersColName, testUsersColIndexName) systemStoreOn.Get(mock.Anything, colIndexOnNameKey.ToDS()).Maybe().Return(indexOnNameDescData, nil) @@ -325,7 +312,7 @@ func TestNonUnique_IfSystemStorageHasInvalidIndexDescription_Error(t *testing.T) Return(mocks.NewQueryResultsWithValues(t, []byte("invalid")), nil) err := f.users.WithTxn(mockTxn).Create(f.ctx, doc) - require.ErrorIs(t, err, NewErrInvalidStoredIndex(nil)) + assert.ErrorIs(t, err, datastore.NewErrInvalidStoredValue(nil)) } func TestNonUnique_IfSystemStorageFailsToReadIndexDesc_Error(t *testing.T) { @@ -361,8 +348,8 @@ func TestNonUnique_IfIndexIntField_StoreIt(t *testing.T) { func TestNonUnique_IfMultipleCollectionsWithIndexes_StoreIndexWithCollectionID(t *testing.T) { f := newIndexTestFixtureBare(t) - users := f.createCollection(getUsersCollectionDesc()) - products := f.createCollection(getProductsCollectionDesc()) + users := f.getUsersCollectionDesc() + products := f.getProductsCollectionDesc() _, err := f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnName()) require.NoError(f.t, err) @@ -437,24 +424,23 @@ func TestNonUnique_StoringIndexedFieldValueOfDifferentTypes(t *testing.T) { } for i, tc := range testCase { - desc := client.CollectionDescription{ - Name: "testTypeCol" + strconv.Itoa(i), - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "field", - Kind: tc.FieldKind, - Typ: client.LWW_REGISTER, - }, - }, - }, - } + _, err := f.db.AddSchema( + f.ctx, + fmt.Sprintf( + `type %s { + field: %s + }`, + "testTypeCol"+strconv.Itoa(i), + tc.FieldKind.String(), + ), + ) + require.NoError(f.t, err) - collection := f.createCollection(desc) + collection, err := f.db.GetCollectionByName(f.ctx, "testTypeCol"+strconv.Itoa(i)) + require.NoError(f.t, err) + + f.txn, err = f.db.NewTxn(f.ctx, false) + require.NoError(f.t, err) indexDesc := client.IndexDescription{ Fields: []client.IndexedFieldDescription{ @@ -462,7 +448,7 @@ func TestNonUnique_StoringIndexedFieldValueOfDifferentTypes(t *testing.T) { }, } - _, err := f.createCollectionIndexFor(collection.Name(), indexDesc) + _, err = f.createCollectionIndexFor(collection.Name(), indexDesc) require.NoError(f.t, err) f.commitTxn() @@ -596,7 +582,7 @@ func TestNonUniqueCreate_IfUponIndexingExistingDocsFetcherFails_ReturnError(t *t doc := f.newUserDoc("John", 21) f.saveDocToCollection(doc, f.users) - f.users.fetcherFactory = tc.PrepareFetcher + f.users.(*collection).fetcherFactory = tc.PrepareFetcher key := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() _, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) @@ -614,7 +600,7 @@ func TestNonUniqueCreate_IfDatastoreFailsToStoreIndex_ReturnError(t *testing.T) f.saveDocToCollection(doc, f.users) fieldKeyString := core.DataStoreKey{ - CollectionID: f.users.desc.IDString(), + CollectionID: f.users.Description().IDString(), }.WithDocKey(doc.Key().String()). WithFieldId("1"). WithValueFlag(). @@ -623,7 +609,7 @@ func TestNonUniqueCreate_IfDatastoreFailsToStoreIndex_ReturnError(t *testing.T) invalidKeyString := fieldKeyString + "/doesn't matter/" // Insert an invalid key within the document prefix, this will generate an error within the fetcher. - f.users.db.multistore.Datastore().Put(f.ctx, ipfsDatastore.NewKey(invalidKeyString), []byte("doesn't matter")) + f.db.multistore.Datastore().Put(f.ctx, ipfsDatastore.NewKey(invalidKeyString), []byte("doesn't matter")) _, err := f.users.CreateIndex(f.ctx, getUsersIndexDescOnName()) require.ErrorIs(f.t, err, core.ErrInvalidKey) @@ -631,7 +617,7 @@ func TestNonUniqueCreate_IfDatastoreFailsToStoreIndex_ReturnError(t *testing.T) func TestNonUniqueDrop_ShouldDeleteStoredIndexedFields(t *testing.T) { f := newIndexTestFixtureBare(t) - users := f.createCollection(getUsersCollectionDesc()) + users := f.getUsersCollectionDesc() _, err := f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnName()) require.NoError(f.t, err) _, err = f.createCollectionIndexFor(users.Name(), getUsersIndexDescOnAge()) @@ -643,7 +629,7 @@ func TestNonUniqueDrop_ShouldDeleteStoredIndexedFields(t *testing.T) { f.saveDocToCollection(f.newUserDoc("John", 21), users) f.saveDocToCollection(f.newUserDoc("Islam", 23), users) - products := f.createCollection(getProductsCollectionDesc()) + products := f.getProductsCollectionDesc() _, err = f.createCollectionIndexFor(products.Name(), getProductsIndexDescOnCategory()) require.NoError(f.t, err) f.commitTxn() @@ -664,86 +650,6 @@ func TestNonUniqueDrop_ShouldDeleteStoredIndexedFields(t *testing.T) { assert.Len(t, f.getPrefixFromDataStore(prodCatKey.ToString()), 1) } -func TestNonUniqueDrop_IfDataStorageFails_ReturnError(t *testing.T) { - testErr := errors.New("test error") - - testCases := []struct { - description string - prepareSystemStorage func(*mocks.DSReaderWriter_Expecter) - }{ - { - description: "Fails to query data storage", - prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { - mockedDS.Query(mock.Anything, mock.Anything).Unset() - mockedDS.Query(mock.Anything, mock.Anything).Return(nil, testErr) - }, - }, - { - description: "Fails to iterate data storage", - prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { - mockedDS.Query(mock.Anything, mock.Anything).Unset() - q := mocks.NewQueryResultsWithResults(t, query.Result{Error: testErr}) - mockedDS.Query(mock.Anything, mock.Anything).Return(q, nil) - q.EXPECT().Close().Unset() - q.EXPECT().Close().Return(nil) - }, - }, - { - description: "Fails to delete from data storage", - prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { - q := mocks.NewQueryResultsWithResults(t, query.Result{Entry: query.Entry{Key: ""}}) - q.EXPECT().Close().Unset() - q.EXPECT().Close().Return(nil) - mockedDS.Query(mock.Anything, mock.Anything).Return(q, nil) - mockedDS.Delete(mock.Anything, mock.Anything).Unset() - mockedDS.Delete(mock.Anything, mock.Anything).Return(testErr) - }, - }, - { - description: "Fails to close data storage query iterator", - prepareSystemStorage: func(mockedDS *mocks.DSReaderWriter_Expecter) { - q := mocks.NewQueryResultsWithResults(t, query.Result{Entry: query.Entry{Key: ""}}) - q.EXPECT().Close().Unset() - q.EXPECT().Close().Return(testErr) - mockedDS.Query(mock.Anything, mock.Anything).Return(q, nil) - }, - }, - } - - for _, tc := range testCases { - f := newIndexTestFixture(t) - f.createUserCollectionIndexOnName() - - mockedTxn := f.mockTxn() - mockedTxn.MockDatastore = mocks.NewDSReaderWriter(t) - tc.prepareSystemStorage(mockedTxn.MockDatastore.EXPECT()) - mockedTxn.EXPECT().Datastore().Unset() - mockedTxn.EXPECT().Datastore().Return(mockedTxn.MockDatastore) - - err := f.dropIndex(usersColName, testUsersColIndexName) - require.ErrorIs(t, err, testErr, tc.description) - } -} - -func TestNonUniqueDrop_ShouldCloseQueryIterator(t *testing.T) { - f := newIndexTestFixture(t) - f.createUserCollectionIndexOnName() - - mockedTxn := f.mockTxn() - - mockedTxn.MockDatastore = mocks.NewDSReaderWriter(f.t) - mockedTxn.EXPECT().Datastore().Unset() - mockedTxn.EXPECT().Datastore().Return(mockedTxn.MockDatastore).Maybe() - queryResults := mocks.NewQueryResultsWithValues(f.t) - queryResults.EXPECT().Close().Unset() - queryResults.EXPECT().Close().Return(nil) - mockedTxn.MockDatastore.EXPECT().Query(mock.Anything, mock.Anything). - Return(queryResults, nil) - - err := f.dropIndex(usersColName, testUsersColIndexName) - assert.NoError(t, err) -} - func TestNonUniqueUpdate_ShouldDeleteOldValueAndStoreNewOne(t *testing.T) { f := newIndexTestFixture(t) f.createUserCollectionIndexOnName() @@ -866,7 +772,7 @@ func TestNonUniqueUpdate_IfFetcherFails_ReturnError(t *testing.T) { PrepareFetcher: func() fetcher.Fetcher { f := fetcherMocks.NewStubbedFetcher(t) f.EXPECT().FetchNext(mock.Anything).Unset() - // By default the the stubbed fetcher returns an empty, invalid document + // By default the stubbed fetcher returns an empty, invalid document // here we need to make sure it reaches the Close call by overriding that default. f.EXPECT().FetchNext(mock.Anything).Maybe().Return(nil, fetcher.ExecInfo{}, nil) f.EXPECT().Close().Unset() @@ -885,7 +791,7 @@ func TestNonUniqueUpdate_IfFetcherFails_ReturnError(t *testing.T) { doc := f.newUserDoc("John", 21) f.saveDocToCollection(doc, f.users) - f.users.fetcherFactory = tc.PrepareFetcher + f.users.(*collection).fetcherFactory = tc.PrepareFetcher oldKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersNameFieldName).Doc(doc).Build() err := doc.Set(usersNameFieldName, "Islam") @@ -911,19 +817,14 @@ func TestNonUniqueUpdate_IfFailsToUpdateIndex_ReturnError(t *testing.T) { f.commitTxn() validKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc).Build() - invalidKey := newIndexKeyBuilder(f).Col(usersColName).Field(usersAgeFieldName).Doc(doc). - Values([]byte("invalid")).Build() - err := f.txn.Datastore().Delete(f.ctx, validKey.ToDS()) require.NoError(f.t, err) - err = f.txn.Datastore().Put(f.ctx, invalidKey.ToDS(), []byte{}) - require.NoError(f.t, err) f.commitTxn() err = doc.Set(usersAgeFieldName, 23) require.NoError(t, err) err = f.users.Update(f.ctx, doc) - require.Error(t, err) + require.ErrorIs(t, err, ErrCorruptedIndex) } func TestNonUniqueUpdate_ShouldPassToFetcherOnlyRelevantFields(t *testing.T) { @@ -931,14 +832,14 @@ func TestNonUniqueUpdate_ShouldPassToFetcherOnlyRelevantFields(t *testing.T) { f.createUserCollectionIndexOnName() f.createUserCollectionIndexOnAge() - f.users.fetcherFactory = func() fetcher.Fetcher { + f.users.(*collection).fetcherFactory = func() fetcher.Fetcher { f := fetcherMocks.NewStubbedFetcher(t) f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything).Unset() f.EXPECT().Init(mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything, mock.Anything). RunAndReturn(func( ctx context.Context, txn datastore.Txn, - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, mapping *core.DocumentMapping, @@ -971,6 +872,7 @@ func TestNonUniqueUpdate_IfDatastoreFails_ReturnError(t *testing.T) { Name: "Delete old value", StubDataStore: func(ds *mocks.DSReaderWriter_Expecter) { ds.Delete(mock.Anything, mock.Anything).Return(testErr) + ds.Has(mock.Anything, mock.Anything).Maybe().Return(true, nil) ds.Get(mock.Anything, mock.Anything).Maybe().Return([]byte{}, nil) }, }, @@ -979,6 +881,7 @@ func TestNonUniqueUpdate_IfDatastoreFails_ReturnError(t *testing.T) { StubDataStore: func(ds *mocks.DSReaderWriter_Expecter) { ds.Delete(mock.Anything, mock.Anything).Maybe().Return(nil) ds.Get(mock.Anything, mock.Anything).Maybe().Return([]byte{}, nil) + ds.Has(mock.Anything, mock.Anything).Maybe().Return(true, nil) ds.Put(mock.Anything, mock.Anything, mock.Anything).Maybe().Return(testErr) }, }, @@ -999,7 +902,7 @@ func TestNonUniqueUpdate_IfDatastoreFails_ReturnError(t *testing.T) { schemaVersionID: f.users.Schema().VersionID, } - f.users.fetcherFactory = func() fetcher.Fetcher { + f.users.(*collection).fetcherFactory = func() fetcher.Fetcher { df := fetcherMocks.NewStubbedFetcher(t) df.EXPECT().FetchNext(mock.Anything).Unset() df.EXPECT().FetchNext(mock.Anything).Return(&encodedDoc, fetcher.ExecInfo{}, nil) diff --git a/db/p2p_collection.go b/db/p2p_collection.go deleted file mode 100644 index 02fc4139c2..0000000000 --- a/db/p2p_collection.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package db - -import ( - "context" - - dsq "github.com/ipfs/go-datastore/query" - - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/datastore" -) - -const marker = byte(0xff) - -// addP2PCollection adds the given collection ID that the P2P system -// subscribes to to the the persisted list. It will error if the provided -// collection ID is invalid. -func (db *db) addP2PCollection(ctx context.Context, txn datastore.Txn, collectionID string) error { - _, err := db.getCollectionBySchemaID(ctx, txn, collectionID) - if err != nil { - return NewErrAddingP2PCollection(err) - } - key := core.NewP2PCollectionKey(collectionID) - return txn.Systemstore().Put(ctx, key.ToDS(), []byte{marker}) -} - -// removeP2PCollection removes the given collection ID that the P2P system -// subscribes to from the the persisted list. It will error if the provided -// collection ID is invalid. -func (db *db) removeP2PCollection(ctx context.Context, txn datastore.Txn, collectionID string) error { - _, err := db.getCollectionBySchemaID(ctx, txn, collectionID) - if err != nil { - return NewErrRemovingP2PCollection(err) - } - key := core.NewP2PCollectionKey(collectionID) - return txn.Systemstore().Delete(ctx, key.ToDS()) -} - -// getAllP2PCollections returns the list of persisted collection IDs that -// the P2P system subscribes to. -func (db *db) getAllP2PCollections(ctx context.Context, txn datastore.Txn) ([]string, error) { - prefix := core.NewP2PCollectionKey("") - results, err := db.systemstore().Query(ctx, dsq.Query{ - Prefix: prefix.ToString(), - }) - if err != nil { - return nil, err - } - - collectionIDs := []string{} - for result := range results.Next() { - key, err := core.NewP2PCollectionKeyFromString(result.Key) - if err != nil { - return nil, err - } - collectionIDs = append(collectionIDs, key.CollectionID) - } - - return collectionIDs, nil -} diff --git a/db/p2p_collection_test.go b/db/p2p_collection_test.go deleted file mode 100644 index bdf54e3a38..0000000000 --- a/db/p2p_collection_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package db - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" -) - -func newTestCollection( - t *testing.T, - ctx context.Context, - db *implicitTxnDB, - name string, -) client.Collection { - desc := client.CollectionDescription{ - Name: name, - Schema: client.SchemaDescription{ - Fields: []client.FieldDescription{ - { - Name: "_key", - Kind: client.FieldKind_DocKey, - }, - { - Name: "Name", - Kind: client.FieldKind_STRING, - Typ: client.LWW_REGISTER, - }, - }, - }, - } - - txn, err := db.db.NewTxn(ctx, false) - require.NoError(t, err) - - col, err := db.db.createCollection(ctx, txn, desc) - require.NoError(t, err) - - err = txn.Commit(ctx) - require.NoError(t, err) - - return col -} - -func TestAddP2PCollection(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - - col := newTestCollection(t, ctx, db, "test") - - err = db.AddP2PCollection(ctx, col.SchemaID()) - require.NoError(t, err) -} - -func TestGetAllP2PCollection(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - - col1 := newTestCollection(t, ctx, db, "test1") - err = db.AddP2PCollection(ctx, col1.SchemaID()) - require.NoError(t, err) - - col2 := newTestCollection(t, ctx, db, "test2") - err = db.AddP2PCollection(ctx, col2.SchemaID()) - require.NoError(t, err) - - col3 := newTestCollection(t, ctx, db, "test3") - err = db.AddP2PCollection(ctx, col3.SchemaID()) - require.NoError(t, err) - - collections, err := db.GetAllP2PCollections(ctx) - require.NoError(t, err) - require.ElementsMatch(t, collections, []string{col1.SchemaID(), col2.SchemaID(), col3.SchemaID()}) -} - -func TestRemoveP2PCollection(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - - col1 := newTestCollection(t, ctx, db, "test1") - err = db.AddP2PCollection(ctx, col1.SchemaID()) - require.NoError(t, err) - - col2 := newTestCollection(t, ctx, db, "test2") - err = db.AddP2PCollection(ctx, col2.SchemaID()) - require.NoError(t, err) - - col3 := newTestCollection(t, ctx, db, "test3") - err = db.AddP2PCollection(ctx, col3.SchemaID()) - require.NoError(t, err) - - err = db.RemoveP2PCollection(ctx, col2.SchemaID()) - require.NoError(t, err) - - collections, err := db.GetAllP2PCollections(ctx) - require.NoError(t, err) - require.ElementsMatch(t, collections, []string{col1.SchemaID(), col3.SchemaID()}) -} diff --git a/db/replicator.go b/db/replicator.go deleted file mode 100644 index 84c94b9f5d..0000000000 --- a/db/replicator.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package db - -import ( - "context" - "encoding/json" - "errors" - - ds "github.com/ipfs/go-datastore" - dsq "github.com/ipfs/go-datastore/query" - "github.com/libp2p/go-libp2p/core/peer" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/datastore" -) - -// setReplicator adds a new replicator to the database. -func (db *db) setReplicator(ctx context.Context, txn datastore.Txn, rep client.Replicator) error { - existingRep, err := db.getReplicator(ctx, rep.Info) - if errors.Is(err, ds.ErrNotFound) { - return db.saveReplicator(ctx, txn, rep) - } - if err != nil { - return err - } - - newSchemas := []string{} - for _, newSchema := range rep.Schemas { - isNew := true - for _, existingSchema := range existingRep.Schemas { - if existingSchema == newSchema { - isNew = false - break - } - } - if isNew { - newSchemas = append(newSchemas, newSchema) - } - } - rep.Schemas = append(existingRep.Schemas, newSchemas...) - return db.saveReplicator(ctx, txn, rep) -} - -// deleteReplicator removes a replicator from the database. -func (db *db) deleteReplicator(ctx context.Context, txn datastore.Txn, rep client.Replicator) error { - if len(rep.Schemas) == 0 { - return db.deleteReplicatorKey(ctx, txn, rep.Info.ID) - } - return db.deleteSchemasForReplicator(ctx, txn, rep) -} - -func (db *db) deleteReplicatorKey(ctx context.Context, txn datastore.Txn, pid peer.ID) error { - key := core.NewReplicatorKey(pid.String()) - return txn.Systemstore().Delete(ctx, key.ToDS()) -} - -func (db *db) deleteSchemasForReplicator(ctx context.Context, txn datastore.Txn, rep client.Replicator) error { - existingRep, err := db.getReplicator(ctx, rep.Info) - if err != nil { - return err - } - - updatedSchemaList := []string{} - for _, s := range existingRep.Schemas { - found := false - for _, toDelete := range rep.Schemas { - if toDelete == s { - found = true - break - } - } - if !found { - updatedSchemaList = append(updatedSchemaList, s) - } - } - - if len(updatedSchemaList) == 0 { - return db.deleteReplicatorKey(ctx, txn, rep.Info.ID) - } - - existingRep.Schemas = updatedSchemaList - return db.saveReplicator(ctx, txn, existingRep) -} - -// GetAllReplicators returns all replicators of the database. -func (db *db) getAllReplicators(ctx context.Context, txn datastore.Txn) ([]client.Replicator, error) { - reps := []client.Replicator{} - // create collection system prefix query - prefix := core.NewReplicatorKey("") - results, err := txn.Systemstore().Query(ctx, dsq.Query{ - Prefix: prefix.ToString(), - }) - if err != nil { - return nil, err - } - - for result := range results.Next() { - var rep client.Replicator - err = json.Unmarshal(result.Value, &rep) - if err != nil { - return nil, err - } - - reps = append(reps, rep) - } - - return reps, nil -} - -func (db *db) getReplicator(ctx context.Context, info peer.AddrInfo) (client.Replicator, error) { - rep := client.Replicator{} - key := core.NewReplicatorKey(info.ID.String()) - value, err := db.systemstore().Get(ctx, key.ToDS()) - if err != nil { - return rep, err - } - - err = json.Unmarshal(value, &rep) - if err != nil { - return rep, err - } - - return rep, nil -} - -func (db *db) saveReplicator(ctx context.Context, txn datastore.Txn, rep client.Replicator) error { - key := core.NewReplicatorKey(rep.Info.ID.String()) - repBytes, err := json.Marshal(rep) - if err != nil { - return err - } - return txn.Systemstore().Put(ctx, key.ToDS(), repBytes) -} diff --git a/db/replicator_test.go b/db/replicator_test.go deleted file mode 100644 index f21ab585a9..0000000000 --- a/db/replicator_test.go +++ /dev/null @@ -1,242 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package db - -import ( - "context" - "testing" - - ds "github.com/ipfs/go-datastore" - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/sourcenetwork/defradb/client" -) - -func TestSetReplicator(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B") - require.NoError(t, err) - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(a) - require.NoError(t, err) - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test"}, - }) - assert.NoError(t, err) -} - -func TestGetAllReplicatorsWith2Addition(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(a) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test"}, - }) - require.NoError(t, err) - - a2, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8C") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info2, err := peer.AddrInfoFromP2pAddr(a2) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info2, - Schemas: []string{"test", "test2", "test3"}, - }) - require.NoError(t, err) - - reps, err := db.GetAllReplicators(ctx) - require.NoError(t, err) - - assert.Equal(t, []client.Replicator{ - { - Info: *info, - Schemas: []string{"test"}, - }, - { - Info: *info2, - Schemas: []string{"test", "test2", "test3"}, - }, - }, reps) -} - -func TestGetAllReplicatorsWith2AdditionsOnSamePeer(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(a) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test"}, - }) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test", "test2", "test3"}, - }) - require.NoError(t, err) - - reps, err := db.GetAllReplicators(ctx) - require.NoError(t, err) - - assert.Equal(t, []client.Replicator{ - { - Info: *info, - Schemas: []string{"test", "test2", "test3"}, - }, - }, reps) -} - -func TestDeleteSchemaForReplicator(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(a) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test", "test2", "test3"}, - }) - require.NoError(t, err) - - err = db.DeleteReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test2"}, - }) - require.NoError(t, err) - - rep, err := db.getReplicator(ctx, *info) - require.NoError(t, err) - - assert.Equal(t, client.Replicator{ - Info: *info, - Schemas: []string{"test", "test3"}, - }, rep) -} - -func TestDeleteAllSchemasForReplicator(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(a) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test", "test2", "test3"}, - }) - require.NoError(t, err) - - err = db.DeleteReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test", "test2", "test3"}, - }) - require.NoError(t, err) - - _, err = db.getReplicator(ctx, *info) - require.ErrorIs(t, err, ds.ErrNotFound) -} - -func TestDeleteReplicatorWith2Addition(t *testing.T) { - ctx := context.Background() - db, err := newMemoryDB(ctx) - require.NoError(t, err) - defer db.Close(ctx) - a, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8B") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(a) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: []string{"test"}, - }) - require.NoError(t, err) - - a2, err := ma.NewMultiaddr("/ip4/192.168.1.12/tcp/9000/p2p/12D3KooWNXm3dmrwCYSxGoRUyZstaKYiHPdt8uZH5vgVaEJyzU8C") - require.NoError(t, err) - - // Extract the peer ID from the multiaddr. - info2, err := peer.AddrInfoFromP2pAddr(a2) - require.NoError(t, err) - - err = db.SetReplicator(ctx, client.Replicator{ - Info: *info2, - Schemas: []string{"test", "test2", "test3"}, - }) - require.NoError(t, err) - - reps, err := db.GetAllReplicators(ctx) - require.NoError(t, err) - - assert.Equal(t, []client.Replicator{ - { - Info: *info, - Schemas: []string{"test"}, - }, - { - Info: *info2, - Schemas: []string{"test", "test2", "test3"}, - }, - }, reps) - - err = db.DeleteReplicator(ctx, client.Replicator{Info: *info}) - require.NoError(t, err) - - reps, err = db.GetAllReplicators(ctx) - require.NoError(t, err) - - assert.Equal(t, []client.Replicator{ - { - Info: *info2, - Schemas: []string{"test", "test2", "test3"}, - }, - }, reps) -} diff --git a/db/schema.go b/db/schema.go index 5c5c0568f8..df95df60e2 100644 --- a/db/schema.go +++ b/db/schema.go @@ -23,13 +23,13 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/db/description" ) const ( schemaNamePathIndex int = 0 - schemaPathIndex int = 1 - fieldsPathIndex int = 2 - fieldIndexPathIndex int = 3 + fieldsPathIndex int = 1 + fieldIndexPathIndex int = 2 ) // addSchema takes the provided schema in SDL format, and applies it to the database, @@ -39,24 +39,29 @@ func (db *db) addSchema( txn datastore.Txn, schemaString string, ) ([]client.CollectionDescription, error) { - existingDescriptions, err := db.getCollectionDescriptions(ctx, txn) + existingCollections, err := db.getAllCollections(ctx, txn) if err != nil { return nil, err } - newDescriptions, err := db.parser.ParseSDL(ctx, schemaString) + existingDefinitions := make([]client.CollectionDefinition, len(existingCollections)) + for i := range existingCollections { + existingDefinitions[i] = existingCollections[i].Definition() + } + + newDefinitions, err := db.parser.ParseSDL(ctx, schemaString) if err != nil { return nil, err } - err = db.parser.SetSchema(ctx, txn, append(existingDescriptions, newDescriptions...)) + err = db.parser.SetSchema(ctx, txn, append(existingDefinitions, newDefinitions...)) if err != nil { return nil, err } - returnDescriptions := make([]client.CollectionDescription, len(newDescriptions)) - for i, desc := range newDescriptions { - col, err := db.createCollection(ctx, txn, desc) + returnDescriptions := make([]client.CollectionDescription, len(newDefinitions)) + for i, definition := range newDefinitions { + col, err := db.createCollection(ctx, txn, definition) if err != nil { return nil, err } @@ -67,32 +72,20 @@ func (db *db) addSchema( } func (db *db) loadSchema(ctx context.Context, txn datastore.Txn) error { - descriptions, err := db.getCollectionDescriptions(ctx, txn) - if err != nil { - return err - } - - return db.parser.SetSchema(ctx, txn, descriptions) -} - -func (db *db) getCollectionDescriptions( - ctx context.Context, - txn datastore.Txn, -) ([]client.CollectionDescription, error) { collections, err := db.getAllCollections(ctx, txn) if err != nil { - return nil, err + return err } - descriptions := make([]client.CollectionDescription, len(collections)) - for i, collection := range collections { - descriptions[i] = collection.Description() + definitions := make([]client.CollectionDefinition, len(collections)) + for i := range collections { + definitions[i] = collections[i].Definition() } - return descriptions, nil + return db.parser.SetSchema(ctx, txn, definitions) } -// patchSchema takes the given JSON patch string and applies it to the set of CollectionDescriptions +// patchSchema takes the given JSON patch string and applies it to the set of SchemaDescriptions // present in the database. // // It will also update the GQL types used by the query system. It will error and not apply any of the @@ -103,24 +96,29 @@ func (db *db) getCollectionDescriptions( // The collections (including the schema version ID) will only be updated if any changes have actually // been made, if the net result of the patch matches the current persisted description then no changes // will be applied. -func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString string) error { +func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString string, setAsDefaultVersion bool) error { patch, err := jsonpatch.DecodePatch([]byte(patchString)) if err != nil { return err } - collectionsByName, err := db.getCollectionsByName(ctx, txn) + schemas, err := description.GetSchemas(ctx, txn) if err != nil { return err } + existingSchemaByName := map[string]client.SchemaDescription{} + for _, schema := range schemas { + existingSchemaByName[schema.Name] = schema + } + // Here we swap out any string representations of enums for their integer values - patch, err = substituteSchemaPatch(patch, collectionsByName) + patch, err = substituteSchemaPatch(patch, existingSchemaByName) if err != nil { return err } - existingDescriptionJson, err := json.Marshal(collectionsByName) + existingDescriptionJson, err := json.Marshal(existingSchemaByName) if err != nil { return err } @@ -130,45 +128,39 @@ func (db *db) patchSchema(ctx context.Context, txn datastore.Txn, patchString st return err } - var newDescriptionsByName map[string]client.CollectionDescription + var newSchemaByName map[string]client.SchemaDescription decoder := json.NewDecoder(strings.NewReader(string(newDescriptionJson))) decoder.DisallowUnknownFields() - err = decoder.Decode(&newDescriptionsByName) + err = decoder.Decode(&newSchemaByName) if err != nil { return err } - newDescriptions := []client.CollectionDescription{} - for _, desc := range newDescriptionsByName { - newDescriptions = append(newDescriptions, desc) - } - - for i, desc := range newDescriptions { - col, err := db.updateCollection(ctx, txn, collectionsByName, newDescriptionsByName, desc) + for _, schema := range newSchemaByName { + err := db.updateSchema( + ctx, + txn, + existingSchemaByName, + newSchemaByName, + schema, + setAsDefaultVersion, + ) if err != nil { return err } - newDescriptions[i] = col.Description() } - return db.parser.SetSchema(ctx, txn, newDescriptions) -} - -func (db *db) getCollectionsByName( - ctx context.Context, - txn datastore.Txn, -) (map[string]client.CollectionDescription, error) { - collections, err := db.getAllCollections(ctx, txn) + newCollections, err := db.getAllCollections(ctx, txn) if err != nil { - return nil, err + return err } - collectionsByName := map[string]client.CollectionDescription{} - for _, collection := range collections { - collectionsByName[collection.Name()] = collection.Description() + definitions := make([]client.CollectionDefinition, len(newCollections)) + for i, col := range newCollections { + definitions[i] = col.Definition() } - return collectionsByName, nil + return db.parser.SetSchema(ctx, txn, definitions) } // substituteSchemaPatch handles any substitution of values that may be required before @@ -178,13 +170,13 @@ func (db *db) getCollectionsByName( // value. func substituteSchemaPatch( patch jsonpatch.Patch, - collectionsByName map[string]client.CollectionDescription, + schemaByName map[string]client.SchemaDescription, ) (jsonpatch.Patch, error) { - fieldIndexesByCollection := make(map[string]map[string]int, len(collectionsByName)) - for colName, col := range collectionsByName { - fieldIndexesByName := make(map[string]int, len(col.Schema.Fields)) - fieldIndexesByCollection[colName] = fieldIndexesByName - for i, field := range col.Schema.Fields { + fieldIndexesBySchema := make(map[string]map[string]int, len(schemaByName)) + for schemaName, schema := range schemaByName { + fieldIndexesByName := make(map[string]int, len(schema.Fields)) + fieldIndexesBySchema[schemaName] = fieldIndexesByName + for i, field := range schema.Fields { fieldIndexesByName[field.Name] = i } } @@ -227,9 +219,9 @@ func substituteSchemaPatch( newPatchValue = immutable.Some[any](field) } - desc := collectionsByName[splitPath[schemaNamePathIndex]] + desc := schemaByName[splitPath[schemaNamePathIndex]] var index string - if fieldIndexesByName, ok := fieldIndexesByCollection[desc.Name]; ok { + if fieldIndexesByName, ok := fieldIndexesBySchema[desc.Name]; ok { if i, ok := fieldIndexesByName[fieldIndexer]; ok { index = fmt.Sprint(i) } @@ -238,7 +230,7 @@ func substituteSchemaPatch( index = "-" // If this is a new field we need to track its location so that subsequent operations // within the patch may access it by field name. - fieldIndexesByCollection[desc.Name][fieldIndexer] = len(fieldIndexesByCollection[desc.Name]) + fieldIndexesBySchema[desc.Name][fieldIndexer] = len(fieldIndexesBySchema[desc.Name]) } splitPath[fieldIndexPathIndex] = index @@ -250,17 +242,17 @@ func substituteSchemaPatch( if isField { if kind, isString := field["Kind"].(string); isString { - substitute, collectionName, err := getSubstituteFieldKind(kind, collectionsByName) + substitute, schemaName, err := getSubstituteFieldKind(kind, schemaByName) if err != nil { return nil, err } field["Kind"] = substitute - if collectionName != "" { - if field["Schema"] != nil && field["Schema"] != collectionName { + if schemaName != "" { + if field["Schema"] != nil && field["Schema"] != schemaName { return nil, NewErrFieldKindDoesNotMatchFieldSchema(kind, field["Schema"].(string)) } - field["Schema"] = collectionName + field["Schema"] = schemaName } newPatchValue = immutable.Some[any](field) @@ -273,7 +265,7 @@ func substituteSchemaPatch( } if kind, isString := kind.(string); isString { - substitute, _, err := getSubstituteFieldKind(kind, collectionsByName) + substitute, _, err := getSubstituteFieldKind(kind, schemaByName) if err != nil { return nil, err } @@ -297,13 +289,44 @@ func substituteSchemaPatch( return patch, nil } +func (db *db) getSchemasByName( + ctx context.Context, + txn datastore.Txn, + name string, +) ([]client.SchemaDescription, error) { + return description.GetSchemasByName(ctx, txn, name) +} + +func (db *db) getSchemaByVersionID( + ctx context.Context, + txn datastore.Txn, + versionID string, +) (client.SchemaDescription, error) { + return description.GetSchemaVersion(ctx, txn, versionID) +} + +func (db *db) getSchemasByRoot( + ctx context.Context, + txn datastore.Txn, + root string, +) ([]client.SchemaDescription, error) { + return description.GetSchemasByRoot(ctx, txn, root) +} + +func (db *db) getAllSchemas( + ctx context.Context, + txn datastore.Txn, +) ([]client.SchemaDescription, error) { + return description.GetAllSchemas(ctx, txn) +} + // getSubstituteFieldKind checks and attempts to get the underlying integer value for the given string // Field Kind value. It will return the value if one is found, else returns an [ErrFieldKindNotFound]. // // If the value represents a foreign relation the collection name will also be returned. func getSubstituteFieldKind( kind string, - collectionsByName map[string]client.CollectionDescription, + schemaByName map[string]client.SchemaDescription, ) (client.FieldKind, string, error) { substitute, substituteFound := client.FieldKindStringToEnumMapping[kind] if substituteFound { @@ -319,7 +342,7 @@ func getSubstituteFieldKind( substitute = client.FieldKind_FOREIGN_OBJECT } - if _, substituteFound := collectionsByName[collectionName]; substituteFound { + if _, substituteFound := schemaByName[collectionName]; substituteFound { return substitute, collectionName, nil } @@ -330,20 +353,19 @@ func getSubstituteFieldKind( // isFieldOrInner returns true if the given path points to a FieldDescription or a property within it. func isFieldOrInner(path []string) bool { //nolint:goconst - return len(path) >= 4 && path[fieldsPathIndex] == "Fields" && path[schemaPathIndex] == "Schema" + return len(path) >= 3 && path[fieldsPathIndex] == "Fields" } // isField returns true if the given path points to a FieldDescription. func isField(path []string) bool { - return len(path) == 4 && path[fieldsPathIndex] == "Fields" && path[schemaPathIndex] == "Schema" + return len(path) == 3 && path[fieldsPathIndex] == "Fields" } // isField returns true if the given path points to a FieldDescription.Kind property. func isFieldKind(path []string) bool { - return len(path) == 5 && + return len(path) == 4 && path[fieldIndexPathIndex+1] == "Kind" && - path[fieldsPathIndex] == "Fields" && - path[schemaPathIndex] == "Schema" + path[fieldsPathIndex] == "Fields" } // containsLetter returns true if the string contains a single unicode character. diff --git a/db/txn_db.go b/db/txn_db.go index b307d96e35..380cfeed34 100644 --- a/db/txn_db.go +++ b/db/txn_db.go @@ -79,112 +79,172 @@ func (db *explicitTxnDB) GetCollectionByName(ctx context.Context, name string) ( return db.getCollectionByName(ctx, db.txn, name) } -// GetCollectionBySchemaID returns an existing collection using the schema hash ID. -func (db *implicitTxnDB) GetCollectionBySchemaID( +// GetCollectionsBySchemaRoot attempts to retrieve all collections using the given schema ID. +// +// If no matching collection is found an empty set will be returned. +func (db *implicitTxnDB) GetCollectionsBySchemaRoot( ctx context.Context, - schemaID string, -) (client.Collection, error) { + schemaRoot string, +) ([]client.Collection, error) { txn, err := db.NewTxn(ctx, true) if err != nil { return nil, err } defer txn.Discard(ctx) - return db.getCollectionBySchemaID(ctx, txn, schemaID) + cols, err := db.getCollectionsBySchemaRoot(ctx, txn, schemaRoot) + if err != nil { + return nil, err + } + + return cols, nil } -// GetCollectionBySchemaID returns an existing collection using the schema hash ID. -func (db *explicitTxnDB) GetCollectionBySchemaID( +// GetCollectionsBySchemaRoot attempts to retrieve all collections using the given schema ID. +// +// If no matching collection is found an empty set will be returned. +func (db *explicitTxnDB) GetCollectionsBySchemaRoot( ctx context.Context, - schemaID string, -) (client.Collection, error) { - return db.getCollectionBySchemaID(ctx, db.txn, schemaID) + schemaRoot string, +) ([]client.Collection, error) { + cols, err := db.getCollectionsBySchemaRoot(ctx, db.txn, schemaRoot) + if err != nil { + return nil, err + } + + return cols, nil } -// GetCollectionByVersionID returns an existing collection using the schema version hash ID. -func (db *implicitTxnDB) GetCollectionByVersionID( +// GetCollectionsByVersionID attempts to retrieve all collections using the given schema version ID. +// +// If no matching collections are found an empty set will be returned. +func (db *implicitTxnDB) GetCollectionsByVersionID( ctx context.Context, schemaVersionID string, -) (client.Collection, error) { +) ([]client.Collection, error) { txn, err := db.NewTxn(ctx, true) if err != nil { return nil, err } defer txn.Discard(ctx) - return db.getCollectionByVersionID(ctx, txn, schemaVersionID) + cols, err := db.getCollectionsByVersionID(ctx, txn, schemaVersionID) + if err != nil { + return nil, err + } + + collections := make([]client.Collection, len(cols)) + for i, col := range cols { + collections[i] = col + } + + return collections, nil } -// GetCollectionByVersionID returns an existing collection using the schema version hash ID. -func (db *explicitTxnDB) GetCollectionByVersionID( +// GetCollectionsByVersionID attempts to retrieve all collections using the given schema version ID. +// +// If no matching collections are found an empty set will be returned. +func (db *explicitTxnDB) GetCollectionsByVersionID( ctx context.Context, schemaVersionID string, -) (client.Collection, error) { - return db.getCollectionByVersionID(ctx, db.txn, schemaVersionID) +) ([]client.Collection, error) { + cols, err := db.getCollectionsByVersionID(ctx, db.txn, schemaVersionID) + if err != nil { + return nil, err + } + + collections := make([]client.Collection, len(cols)) + for i, col := range cols { + collections[i] = col + } + + return collections, nil } -// AddP2PCollection adds the given collection ID that the P2P system -// subscribes to to the the persisted list. It will error if the provided -// collection ID is invalid. -func (db *implicitTxnDB) AddP2PCollection(ctx context.Context, collectionID string) error { - txn, err := db.NewTxn(ctx, false) +// GetAllCollections gets all the currently defined collections. +func (db *implicitTxnDB) GetAllCollections(ctx context.Context) ([]client.Collection, error) { + txn, err := db.NewTxn(ctx, true) if err != nil { - return err + return nil, err } defer txn.Discard(ctx) - err = db.addP2PCollection(ctx, txn, collectionID) + return db.getAllCollections(ctx, txn) +} + +// GetAllCollections gets all the currently defined collections. +func (db *explicitTxnDB) GetAllCollections(ctx context.Context) ([]client.Collection, error) { + return db.getAllCollections(ctx, db.txn) +} + +// GetSchemasByName returns the all schema versions with the given name. +func (db *implicitTxnDB) GetSchemasByName(ctx context.Context, name string) ([]client.SchemaDescription, error) { + txn, err := db.NewTxn(ctx, true) if err != nil { - return err + return nil, err } + defer txn.Discard(ctx) - return txn.Commit(ctx) + return db.getSchemasByName(ctx, txn, name) } -// AddP2PCollection adds the given collection ID that the P2P system -// subscribes to to the the persisted list. It will error if the provided -// collection ID is invalid. -func (db *explicitTxnDB) AddP2PCollection(ctx context.Context, collectionID string) error { - return db.addP2PCollection(ctx, db.txn, collectionID) +// GetSchemasByName returns the all schema versions with the given name. +func (db *explicitTxnDB) GetSchemasByName(ctx context.Context, name string) ([]client.SchemaDescription, error) { + return db.getSchemasByName(ctx, db.txn, name) } -// RemoveP2PCollection removes the given collection ID that the P2P system -// subscribes to from the the persisted list. It will error if the provided -// collection ID is invalid. -func (db *implicitTxnDB) RemoveP2PCollection(ctx context.Context, collectionID string) error { - txn, err := db.NewTxn(ctx, false) +// GetSchemaByVersionID returns the schema description for the schema version of the +// ID provided. +// +// Will return an error if it is not found. +func (db *implicitTxnDB) GetSchemaByVersionID(ctx context.Context, versionID string) (client.SchemaDescription, error) { + txn, err := db.NewTxn(ctx, true) if err != nil { - return err + return client.SchemaDescription{}, err } defer txn.Discard(ctx) - err = db.removeP2PCollection(ctx, txn, collectionID) + return db.getSchemaByVersionID(ctx, txn, versionID) +} + +// GetSchemaByVersionID returns the schema description for the schema version of the +// ID provided. +// +// Will return an error if it is not found. +func (db *explicitTxnDB) GetSchemaByVersionID(ctx context.Context, versionID string) (client.SchemaDescription, error) { + return db.getSchemaByVersionID(ctx, db.txn, versionID) +} + +// GetSchemasByRoot returns the all schema versions for the given root. +func (db *implicitTxnDB) GetSchemasByRoot(ctx context.Context, root string) ([]client.SchemaDescription, error) { + txn, err := db.NewTxn(ctx, true) if err != nil { - return err + return nil, err } + defer txn.Discard(ctx) - return txn.Commit(ctx) + return db.getSchemasByRoot(ctx, txn, root) } -// RemoveP2PCollection removes the given collection ID that the P2P system -// subscribes to from the the persisted list. It will error if the provided -// collection ID is invalid. -func (db *explicitTxnDB) RemoveP2PCollection(ctx context.Context, collectionID string) error { - return db.removeP2PCollection(ctx, db.txn, collectionID) +// GetSchemasByRoot returns the all schema versions for the given root. +func (db *explicitTxnDB) GetSchemasByRoot(ctx context.Context, root string) ([]client.SchemaDescription, error) { + return db.getSchemasByRoot(ctx, db.txn, root) } -// GetAllCollections gets all the currently defined collections. -func (db *implicitTxnDB) GetAllCollections(ctx context.Context) ([]client.Collection, error) { +// GetAllSchemas returns all schema versions that currently exist within +// this [Store]. +func (db *implicitTxnDB) GetAllSchemas(ctx context.Context) ([]client.SchemaDescription, error) { txn, err := db.NewTxn(ctx, true) if err != nil { return nil, err } defer txn.Discard(ctx) - return db.getAllCollections(ctx, txn) + return db.getAllSchemas(ctx, txn) } -// GetAllCollections gets all the currently defined collections. -func (db *explicitTxnDB) GetAllCollections(ctx context.Context) ([]client.Collection, error) { - return db.getAllCollections(ctx, db.txn) +// GetAllSchemas returns all schema versions that currently exist within +// this [Store]. +func (db *explicitTxnDB) GetAllSchemas(ctx context.Context) ([]client.SchemaDescription, error) { + return db.getAllSchemas(ctx, db.txn) } // GetAllIndexes gets all the indexes in the database. @@ -250,14 +310,14 @@ func (db *explicitTxnDB) AddSchema(ctx context.Context, schemaString string) ([] // The collections (including the schema version ID) will only be updated if any changes have actually // been made, if the net result of the patch matches the current persisted description then no changes // will be applied. -func (db *implicitTxnDB) PatchSchema(ctx context.Context, patchString string) error { +func (db *implicitTxnDB) PatchSchema(ctx context.Context, patchString string, setAsDefaultVersion bool) error { txn, err := db.NewTxn(ctx, false) if err != nil { return err } defer txn.Discard(ctx) - err = db.patchSchema(ctx, txn, patchString) + err = db.patchSchema(ctx, txn, patchString, setAsDefaultVersion) if err != nil { return err } @@ -276,38 +336,18 @@ func (db *implicitTxnDB) PatchSchema(ctx context.Context, patchString string) er // The collections (including the schema version ID) will only be updated if any changes have actually // been made, if the net result of the patch matches the current persisted description then no changes // will be applied. -func (db *explicitTxnDB) PatchSchema(ctx context.Context, patchString string) error { - return db.patchSchema(ctx, db.txn, patchString) -} - -func (db *implicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { - txn, err := db.NewTxn(ctx, false) - if err != nil { - return err - } - defer txn.Discard(ctx) - - err = db.lensRegistry.SetMigration(ctx, cfg) - if err != nil { - return err - } - - return txn.Commit(ctx) -} - -func (db *explicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { - return db.lensRegistry.SetMigration(ctx, cfg) +func (db *explicitTxnDB) PatchSchema(ctx context.Context, patchString string, setAsDefaultVersion bool) error { + return db.patchSchema(ctx, db.txn, patchString, setAsDefaultVersion) } -// SetReplicator adds a new replicator to the database. -func (db *implicitTxnDB) SetReplicator(ctx context.Context, rep client.Replicator) error { +func (db *implicitTxnDB) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { txn, err := db.NewTxn(ctx, false) if err != nil { return err } defer txn.Discard(ctx) - err = db.setReplicator(ctx, txn, rep) + err = db.setDefaultSchemaVersion(ctx, txn, schemaVersionID) if err != nil { return err } @@ -315,20 +355,18 @@ func (db *implicitTxnDB) SetReplicator(ctx context.Context, rep client.Replicato return txn.Commit(ctx) } -// SetReplicator adds a new replicator to the database. -func (db *explicitTxnDB) SetReplicator(ctx context.Context, rep client.Replicator) error { - return db.setReplicator(ctx, db.txn, rep) +func (db *explicitTxnDB) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { + return db.setDefaultSchemaVersion(ctx, db.txn, schemaVersionID) } -// DeleteReplicator removes a replicator from the database. -func (db *implicitTxnDB) DeleteReplicator(ctx context.Context, rep client.Replicator) error { +func (db *implicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { txn, err := db.NewTxn(ctx, false) if err != nil { return err } defer txn.Discard(ctx) - err = db.deleteReplicator(ctx, txn, rep) + err = db.lensRegistry.SetMigration(ctx, cfg) if err != nil { return err } @@ -336,43 +374,8 @@ func (db *implicitTxnDB) DeleteReplicator(ctx context.Context, rep client.Replic return txn.Commit(ctx) } -// DeleteReplicator removes a replicator from the database. -func (db *explicitTxnDB) DeleteReplicator(ctx context.Context, rep client.Replicator) error { - return db.deleteReplicator(ctx, db.txn, rep) -} - -// GetAllReplicators returns all replicators of the database. -func (db *implicitTxnDB) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { - txn, err := db.NewTxn(ctx, true) - if err != nil { - return nil, err - } - defer txn.Discard(ctx) - - return db.getAllReplicators(ctx, txn) -} - -// GetAllReplicators returns all replicators of the database. -func (db *explicitTxnDB) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { - return db.getAllReplicators(ctx, db.txn) -} - -// GetAllP2PCollections returns the list of persisted collection IDs that -// the P2P system subscribes to. -func (db *implicitTxnDB) GetAllP2PCollections(ctx context.Context) ([]string, error) { - txn, err := db.NewTxn(ctx, true) - if err != nil { - return nil, err - } - defer txn.Discard(ctx) - - return db.getAllP2PCollections(ctx, txn) -} - -// GetAllP2PCollections returns the list of persisted collection IDs that -// the P2P system subscribes to. -func (db *explicitTxnDB) GetAllP2PCollections(ctx context.Context) ([]string, error) { - return db.getAllP2PCollections(ctx, db.txn) +func (db *explicitTxnDB) SetMigration(ctx context.Context, cfg client.LensConfig) error { + return db.lensRegistry.SetMigration(ctx, cfg) } // BasicImport imports a json dataset. diff --git a/docs/cli/defradb_client.md b/docs/cli/defradb_client.md index 7173befb6b..a52fce09f3 100644 --- a/docs/cli/defradb_client.md +++ b/docs/cli/defradb_client.md @@ -10,7 +10,8 @@ Execute queries, add schema types, obtain node info, etc. ### Options ``` - -h, --help help for client + -h, --help help for client + --tx uint Transaction ID ``` ### Options inherited from parent commands @@ -30,12 +31,11 @@ Execute queries, add schema types, obtain node info, etc. * [defradb](defradb.md) - DefraDB Edge Database * [defradb client backup](defradb_client_backup.md) - Interact with the backup utility -* [defradb client blocks](defradb_client_blocks.md) - Interact with the database's blockstore +* [defradb client collection](defradb_client_collection.md) - Interact with a collection. * [defradb client dump](defradb_client_dump.md) - Dump the contents of DefraDB node-side * [defradb client index](defradb_client_index.md) - Manage collections' indexes of a running DefraDB instance -* [defradb client peerid](defradb_client_peerid.md) - Get the PeerID of the node -* [defradb client ping](defradb_client_ping.md) - Ping to test connection with a node +* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system * [defradb client query](defradb_client_query.md) - Send a DefraDB GraphQL query request -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB node via RPC * [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node +* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions diff --git a/docs/cli/defradb_client_backup.md b/docs/cli/defradb_client_backup.md index baa08725e1..77e111795d 100644 --- a/docs/cli/defradb_client_backup.md +++ b/docs/cli/defradb_client_backup.md @@ -23,6 +23,7 @@ Currently only supports JSON format. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_backup_export.md b/docs/cli/defradb_client_backup_export.md index ea8a22d634..b7547ea641 100644 --- a/docs/cli/defradb_client_backup_export.md +++ b/docs/cli/defradb_client_backup_export.md @@ -37,6 +37,7 @@ defradb client backup export [-c --collections | -p --pretty | -f --format] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_collection.md b/docs/cli/defradb_client_collection.md new file mode 100644 index 0000000000..7807f49503 --- /dev/null +++ b/docs/cli/defradb_client_collection.md @@ -0,0 +1,41 @@ +## defradb client collection + +Interact with a collection. + +### Synopsis + +Create, read, update, and delete documents within a collection. + +### Options + +``` + -h, --help help for collection + --name string Collection name + --schema string Collection schema Root + --tx uint Transaction ID + --version string Collection version ID +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client collection create](defradb_client_collection_create.md) - Create a new document. +* [defradb client collection delete](defradb_client_collection_delete.md) - Delete documents by key or filter. +* [defradb client collection describe](defradb_client_collection_describe.md) - View collection description. +* [defradb client collection get](defradb_client_collection_get.md) - View document fields. +* [defradb client collection keys](defradb_client_collection_keys.md) - List all document keys. +* [defradb client collection update](defradb_client_collection_update.md) - Update documents by key or filter. + diff --git a/docs/cli/defradb_client_collection_create.md b/docs/cli/defradb_client_collection_create.md new file mode 100644 index 0000000000..42b53b37af --- /dev/null +++ b/docs/cli/defradb_client_collection_create.md @@ -0,0 +1,53 @@ +## defradb client collection create + +Create a new document. + +### Synopsis + +Create a new document. + +Example: create from string + defradb client collection create --name User '{ "name": "Bob" }' + +Example: create multiple from string + defradb client collection create --name User '[{ "name": "Alice" }, { "name": "Bob" }]' + +Example: create from file + defradb client collection create --name User -f document.json + +Example: create from stdin + cat document.json | defradb client collection create --name User - + + +``` +defradb client collection create [flags] +``` + +### Options + +``` + -f, --file string File containing document(s) + -h, --help help for create +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --name string Collection name + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --schema string Collection schema Root + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --version string Collection version ID +``` + +### SEE ALSO + +* [defradb client collection](defradb_client_collection.md) - Interact with a collection. + diff --git a/docs/cli/defradb_client_collection_delete.md b/docs/cli/defradb_client_collection_delete.md new file mode 100644 index 0000000000..fea6c6ccc7 --- /dev/null +++ b/docs/cli/defradb_client_collection_delete.md @@ -0,0 +1,48 @@ +## defradb client collection delete + +Delete documents by key or filter. + +### Synopsis + +Delete documents by key or filter and lists the number of documents deleted. + +Example: delete by key(s) + defradb client collection delete --name User --key bae-123,bae-456 + +Example: delete by filter + defradb client collection delete --name User --filter '{ "_gte": { "points": 100 } }' + + +``` +defradb client collection delete [--filter --key ] [flags] +``` + +### Options + +``` + --filter string Document filter + -h, --help help for delete + --key strings Document key +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --name string Collection name + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --schema string Collection schema Root + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --version string Collection version ID +``` + +### SEE ALSO + +* [defradb client collection](defradb_client_collection.md) - Interact with a collection. + diff --git a/docs/cli/defradb_client_collection_describe.md b/docs/cli/defradb_client_collection_describe.md new file mode 100644 index 0000000000..88e2427bed --- /dev/null +++ b/docs/cli/defradb_client_collection_describe.md @@ -0,0 +1,52 @@ +## defradb client collection describe + +View collection description. + +### Synopsis + +Introspect collection types. + +Example: view all collections + defradb client collection describe + +Example: view collection by name + defradb client collection describe --name User + +Example: view collection by schema id + defradb client collection describe --schema bae123 + +Example: view collection by version id + defradb client collection describe --version bae123 + + +``` +defradb client collection describe [flags] +``` + +### Options + +``` + -h, --help help for describe +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --name string Collection name + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --schema string Collection schema Root + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --version string Collection version ID +``` + +### SEE ALSO + +* [defradb client collection](defradb_client_collection.md) - Interact with a collection. + diff --git a/docs/cli/defradb_client_collection_get.md b/docs/cli/defradb_client_collection_get.md new file mode 100644 index 0000000000..675988c487 --- /dev/null +++ b/docs/cli/defradb_client_collection_get.md @@ -0,0 +1,44 @@ +## defradb client collection get + +View document fields. + +### Synopsis + +View document fields. + +Example: + defradb client collection get --name User bae-123 + + +``` +defradb client collection get [--show-deleted] [flags] +``` + +### Options + +``` + -h, --help help for get + --show-deleted Show deleted documents +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --name string Collection name + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --schema string Collection schema Root + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --version string Collection version ID +``` + +### SEE ALSO + +* [defradb client collection](defradb_client_collection.md) - Interact with a collection. + diff --git a/docs/cli/defradb_client_collection_keys.md b/docs/cli/defradb_client_collection_keys.md new file mode 100644 index 0000000000..234d8e051f --- /dev/null +++ b/docs/cli/defradb_client_collection_keys.md @@ -0,0 +1,43 @@ +## defradb client collection keys + +List all document keys. + +### Synopsis + +List all document keys. + +Example: + defradb client collection keys --name User + + +``` +defradb client collection keys [flags] +``` + +### Options + +``` + -h, --help help for keys +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --name string Collection name + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --schema string Collection schema Root + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --version string Collection version ID +``` + +### SEE ALSO + +* [defradb client collection](defradb_client_collection.md) - Interact with a collection. + diff --git a/docs/cli/defradb_client_collection_update.md b/docs/cli/defradb_client_collection_update.md new file mode 100644 index 0000000000..c081614cce --- /dev/null +++ b/docs/cli/defradb_client_collection_update.md @@ -0,0 +1,54 @@ +## defradb client collection update + +Update documents by key or filter. + +### Synopsis + +Update documents by key or filter. + +Example: update from string + defradb client collection update --name User --key bae-123 '{ "name": "Bob" }' + +Example: update by filter + defradb client collection update --name User \ + --filter '{ "_gte": { "points": 100 } }' --updater '{ "verified": true }' + +Example: update by keys + defradb client collection update --name User \ + --key bae-123,bae-456 --updater '{ "verified": true }' + + +``` +defradb client collection update [--filter --key --updater ] [flags] +``` + +### Options + +``` + --filter string Document filter + -h, --help help for update + --key strings Document key + --updater string Document updater +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --name string Collection name + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --schema string Collection schema Root + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") + --version string Collection version ID +``` + +### SEE ALSO + +* [defradb client collection](defradb_client_collection.md) - Interact with a collection. + diff --git a/docs/cli/defradb_client_document.md b/docs/cli/defradb_client_document.md new file mode 100644 index 0000000000..bc527357e7 --- /dev/null +++ b/docs/cli/defradb_client_document.md @@ -0,0 +1,38 @@ +## defradb client document + +Create, read, update, and delete documents. + +### Synopsis + +Create, read, update, and delete documents. + +### Options + +``` + -h, --help help for document +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client document create](defradb_client_document_create.md) - Create a new document. +* [defradb client document delete](defradb_client_document_delete.md) - Delete documents by key or filter. +* [defradb client document get](defradb_client_document_get.md) - View detailed document info. +* [defradb client document keys](defradb_client_document_keys.md) - List all collection document keys. +* [defradb client document save](defradb_client_document_save.md) - Create or update a document. +* [defradb client document update](defradb_client_document_update.md) - Update documents by key or filter. + diff --git a/docs/cli/defradb_client_document_create.md b/docs/cli/defradb_client_document_create.md new file mode 100644 index 0000000000..99dbd0d7f5 --- /dev/null +++ b/docs/cli/defradb_client_document_create.md @@ -0,0 +1,44 @@ +## defradb client document create + +Create a new document. + +### Synopsis + +Create a new document. + +Example: create document + defradb client document create --collection User '{ "name": "Bob" }' + +Example: create documents + defradb client document create --collection User '[{ "name": "Alice" }, { "name": "Bob" }]' + + +``` +defradb client document create --collection [flags] +``` + +### Options + +``` + -c, --collection string Collection name + -h, --help help for create +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. + diff --git a/docs/cli/defradb_client_document_delete.md b/docs/cli/defradb_client_document_delete.md new file mode 100644 index 0000000000..96a0b1e973 --- /dev/null +++ b/docs/cli/defradb_client_document_delete.md @@ -0,0 +1,46 @@ +## defradb client document delete + +Delete documents by key or filter. + +### Synopsis + +Delete documents by key or filter and lists the number of documents deleted. + +Example: delete by key(s) + defradb client document delete --collection User --key bae-123,bae-456 + +Example: delete by filter + defradb client document delete --collection User --filter '{ "_gte": { "points": 100 } }' + + +``` +defradb client document delete --collection [--filter --key ] [flags] +``` + +### Options + +``` + -c, --collection string Collection name + --filter string Document filter + -h, --help help for delete + --key strings Document key +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. + diff --git a/docs/cli/defradb_client_document_get.md b/docs/cli/defradb_client_document_get.md new file mode 100644 index 0000000000..600712ec0b --- /dev/null +++ b/docs/cli/defradb_client_document_get.md @@ -0,0 +1,42 @@ +## defradb client document get + +View detailed document info. + +### Synopsis + +View detailed document info. + +Example: + defradb client document get --collection User bae-123 + + +``` +defradb client document get --collection [--show-deleted] [flags] +``` + +### Options + +``` + -c, --collection string Collection name + -h, --help help for get + --show-deleted Show deleted documents +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. + diff --git a/docs/cli/defradb_client_document_keys.md b/docs/cli/defradb_client_document_keys.md new file mode 100644 index 0000000000..e436f4df6b --- /dev/null +++ b/docs/cli/defradb_client_document_keys.md @@ -0,0 +1,41 @@ +## defradb client document keys + +List all collection document keys. + +### Synopsis + +List all collection document keys. + +Example: + defradb client document keys --collection User keys + + +``` +defradb client document keys --collection [flags] +``` + +### Options + +``` + -c, --collection string Collection name + -h, --help help for keys +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. + diff --git a/docs/cli/defradb_client_document_save.md b/docs/cli/defradb_client_document_save.md new file mode 100644 index 0000000000..41f59a860c --- /dev/null +++ b/docs/cli/defradb_client_document_save.md @@ -0,0 +1,42 @@ +## defradb client document save + +Create or update a document. + +### Synopsis + +Create or update a document. + +Example: + defradb client document save --collection User --key bae-123 '{ "name": "Bob" }' + + +``` +defradb client document save --collection --key [flags] +``` + +### Options + +``` + -c, --collection string Collection name + -h, --help help for save + --key string Document key +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. + diff --git a/docs/cli/defradb_client_document_update.md b/docs/cli/defradb_client_document_update.md new file mode 100644 index 0000000000..3efc67ebf0 --- /dev/null +++ b/docs/cli/defradb_client_document_update.md @@ -0,0 +1,52 @@ +## defradb client document update + +Update documents by key or filter. + +### Synopsis + +Update documents by key or filter. + +Example: + defradb client document update --collection User --key bae-123 '{ "name": "Bob" }' + +Example: update by filter + defradb client document update --collection User \ + --filter '{ "_gte": { "points": 100 } }' --updater '{ "verified": true }' + +Example: update by keys + defradb client document update --collection User \ + --key bae-123,bae-456 --updater '{ "verified": true }' + + +``` +defradb client document update --collection [--filter --key --updater ] [flags] +``` + +### Options + +``` + -c, --collection string Collection name + --filter string Document filter + -h, --help help for update + --key strings Document key + --updater string Document updater +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client document](defradb_client_document.md) - Create, read, update, and delete documents. + diff --git a/docs/cli/defradb_client_dump.md b/docs/cli/defradb_client_dump.md index 862154bc17..3ebd35343c 100644 --- a/docs/cli/defradb_client_dump.md +++ b/docs/cli/defradb_client_dump.md @@ -22,6 +22,7 @@ defradb client dump [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_index.md b/docs/cli/defradb_client_index.md index 4babb57d46..a876bbcc4f 100644 --- a/docs/cli/defradb_client_index.md +++ b/docs/cli/defradb_client_index.md @@ -22,6 +22,7 @@ Manage (create, drop, or list) collection indexes on a DefraDB node. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_index_create.md b/docs/cli/defradb_client_index_create.md index 7f67e58075..96b6418440 100644 --- a/docs/cli/defradb_client_index_create.md +++ b/docs/cli/defradb_client_index_create.md @@ -22,7 +22,7 @@ defradb client index create -c --collection --fields [-n - ``` -c, --collection string Collection name - --fields string Fields to index + --fields strings Fields to index -h, --help help for create -n, --name string Index name ``` @@ -37,6 +37,7 @@ defradb client index create -c --collection --fields [-n - --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_index_drop.md b/docs/cli/defradb_client_index_drop.md index f551fe4658..c5171b756e 100644 --- a/docs/cli/defradb_client_index_drop.md +++ b/docs/cli/defradb_client_index_drop.md @@ -31,6 +31,7 @@ defradb client index drop -c --collection -n --name [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_index_list.md b/docs/cli/defradb_client_index_list.md index bf434d30f2..c7e96d4e4f 100644 --- a/docs/cli/defradb_client_index_list.md +++ b/docs/cli/defradb_client_index_list.md @@ -33,6 +33,7 @@ defradb client index list [-c --collection ] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_rpc.md b/docs/cli/defradb_client_p2p.md similarity index 67% rename from docs/cli/defradb_client_rpc.md rename to docs/cli/defradb_client_p2p.md index d7046433c5..386929950d 100644 --- a/docs/cli/defradb_client_rpc.md +++ b/docs/cli/defradb_client_p2p.md @@ -1,16 +1,15 @@ -## defradb client rpc +## defradb client p2p -Interact with a DefraDB node via RPC +Interact with the DefraDB P2P system ### Synopsis -Interact with a DefraDB node via RPC. +Interact with the DefraDB P2P system ### Options ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") - -h, --help help for rpc + -h, --help help for p2p ``` ### Options inherited from parent commands @@ -23,12 +22,14 @@ Interact with a DefraDB node via RPC. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO * [defradb client](defradb_client.md) - Interact with a DefraDB node -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system +* [defradb client p2p collection](defradb_client_p2p_collection.md) - Configure the P2P collection system +* [defradb client p2p info](defradb_client_p2p_info.md) - Get peer info from a DefraDB node +* [defradb client p2p replicator](defradb_client_p2p_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_rpc_p2pcollection.md b/docs/cli/defradb_client_p2p_collection.md similarity index 62% rename from docs/cli/defradb_client_rpc_p2pcollection.md rename to docs/cli/defradb_client_p2p_collection.md index ede32521d4..6fec3171da 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection.md +++ b/docs/cli/defradb_client_p2p_collection.md @@ -1,4 +1,4 @@ -## defradb client rpc p2pcollection +## defradb client p2p collection Configure the P2P collection system @@ -10,13 +10,12 @@ The selected collections synchronize their events on the pubsub network. ### Options ``` - -h, --help help for p2pcollection + -h, --help help for collection ``` ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -24,13 +23,14 @@ The selected collections synchronize their events on the pubsub network. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB node via RPC -* [defradb client rpc p2pcollection add](defradb_client_rpc_p2pcollection_add.md) - Add P2P collections -* [defradb client rpc p2pcollection getall](defradb_client_rpc_p2pcollection_getall.md) - Get all P2P collections -* [defradb client rpc p2pcollection remove](defradb_client_rpc_p2pcollection_remove.md) - Remove P2P collections +* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system +* [defradb client p2p collection add](defradb_client_p2p_collection_add.md) - Add P2P collections +* [defradb client p2p collection getall](defradb_client_p2p_collection_getall.md) - Get all P2P collections +* [defradb client p2p collection remove](defradb_client_p2p_collection_remove.md) - Remove P2P collections diff --git a/docs/cli/defradb_client_rpc_p2pcollection_add.md b/docs/cli/defradb_client_p2p_collection_add.md similarity index 70% rename from docs/cli/defradb_client_rpc_p2pcollection_add.md rename to docs/cli/defradb_client_p2p_collection_add.md index 92ac0d82e6..c55c807404 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection_add.md +++ b/docs/cli/defradb_client_p2p_collection_add.md @@ -1,4 +1,4 @@ -## defradb client rpc p2pcollection add +## defradb client p2p collection add Add P2P collections @@ -7,8 +7,15 @@ Add P2P collections Add P2P collections to the synchronized pubsub topics. The collections are synchronized between nodes of a pubsub network. +Example: add single collection + defradb client p2p collection add bae123 + +Example: add multiple collections + defradb client p2p collection add bae123,bae456 + + ``` -defradb client rpc p2pcollection add [collectionID] [flags] +defradb client p2p collection add [collectionIDs] [flags] ``` ### Options @@ -20,7 +27,6 @@ defradb client rpc p2pcollection add [collectionID] [flags] ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -28,10 +34,11 @@ defradb client rpc p2pcollection add [collectionID] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system +* [defradb client p2p collection](defradb_client_p2p_collection.md) - Configure the P2P collection system diff --git a/docs/cli/defradb_client_rpc_p2pcollection_getall.md b/docs/cli/defradb_client_p2p_collection_getall.md similarity index 78% rename from docs/cli/defradb_client_rpc_p2pcollection_getall.md rename to docs/cli/defradb_client_p2p_collection_getall.md index 946a2e0156..46fcefc407 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection_getall.md +++ b/docs/cli/defradb_client_p2p_collection_getall.md @@ -1,4 +1,4 @@ -## defradb client rpc p2pcollection getall +## defradb client p2p collection getall Get all P2P collections @@ -8,7 +8,7 @@ Get all P2P collections in the pubsub topics. This is the list of collections of the node that are synchronized on the pubsub network. ``` -defradb client rpc p2pcollection getall [flags] +defradb client p2p collection getall [flags] ``` ### Options @@ -20,7 +20,6 @@ defradb client rpc p2pcollection getall [flags] ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -28,10 +27,11 @@ defradb client rpc p2pcollection getall [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system +* [defradb client p2p collection](defradb_client_p2p_collection.md) - Configure the P2P collection system diff --git a/docs/cli/defradb_client_rpc_p2pcollection_remove.md b/docs/cli/defradb_client_p2p_collection_remove.md similarity index 69% rename from docs/cli/defradb_client_rpc_p2pcollection_remove.md rename to docs/cli/defradb_client_p2p_collection_remove.md index 77658b4d50..2aaa0901b4 100644 --- a/docs/cli/defradb_client_rpc_p2pcollection_remove.md +++ b/docs/cli/defradb_client_p2p_collection_remove.md @@ -1,4 +1,4 @@ -## defradb client rpc p2pcollection remove +## defradb client p2p collection remove Remove P2P collections @@ -7,8 +7,15 @@ Remove P2P collections Remove P2P collections from the followed pubsub topics. The removed collections will no longer be synchronized between nodes. +Example: remove single collection + defradb client p2p collection remove bae123 + +Example: remove multiple collections + defradb client p2p collection remove bae123,bae456 + + ``` -defradb client rpc p2pcollection remove [collectionID] [flags] +defradb client p2p collection remove [collectionIDs] [flags] ``` ### Options @@ -20,7 +27,6 @@ defradb client rpc p2pcollection remove [collectionID] [flags] ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -28,10 +34,11 @@ defradb client rpc p2pcollection remove [collectionID] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc p2pcollection](defradb_client_rpc_p2pcollection.md) - Configure the P2P collection system +* [defradb client p2p collection](defradb_client_p2p_collection.md) - Configure the P2P collection system diff --git a/docs/cli/defradb_client_peerid.md b/docs/cli/defradb_client_p2p_info.md similarity index 73% rename from docs/cli/defradb_client_peerid.md rename to docs/cli/defradb_client_p2p_info.md index f4596111c8..793179f3d7 100644 --- a/docs/cli/defradb_client_peerid.md +++ b/docs/cli/defradb_client_p2p_info.md @@ -1,19 +1,19 @@ -## defradb client peerid +## defradb client p2p info -Get the PeerID of the node +Get peer info from a DefraDB node ### Synopsis -Get the PeerID of the node. +Get peer info from a DefraDB node ``` -defradb client peerid [flags] +defradb client p2p info [flags] ``` ### Options ``` - -h, --help help for peerid + -h, --help help for info ``` ### Options inherited from parent commands @@ -26,10 +26,11 @@ defradb client peerid [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system diff --git a/docs/cli/defradb_client_rpc_replicator.md b/docs/cli/defradb_client_p2p_replicator.md similarity index 68% rename from docs/cli/defradb_client_rpc_replicator.md rename to docs/cli/defradb_client_p2p_replicator.md index e88933791c..4aa4eb9996 100644 --- a/docs/cli/defradb_client_rpc_replicator.md +++ b/docs/cli/defradb_client_p2p_replicator.md @@ -1,4 +1,4 @@ -## defradb client rpc replicator +## defradb client p2p replicator Configure the replicator system @@ -16,7 +16,6 @@ A replicator replicates one or all collection(s) from one node to another. ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -24,13 +23,14 @@ A replicator replicates one or all collection(s) from one node to another. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB node via RPC -* [defradb client rpc replicator delete](defradb_client_rpc_replicator_delete.md) - Delete a replicator. It will stop synchronizing -* [defradb client rpc replicator getall](defradb_client_rpc_replicator_getall.md) - Get all replicators -* [defradb client rpc replicator set](defradb_client_rpc_replicator_set.md) - Set a P2P replicator +* [defradb client p2p](defradb_client_p2p.md) - Interact with the DefraDB P2P system +* [defradb client p2p replicator delete](defradb_client_p2p_replicator_delete.md) - Delete replicator(s) and stop synchronization +* [defradb client p2p replicator getall](defradb_client_p2p_replicator_getall.md) - Get all replicators +* [defradb client p2p replicator set](defradb_client_p2p_replicator_set.md) - Add replicator(s) and start synchronization diff --git a/docs/cli/defradb_client_rpc_replicator_delete.md b/docs/cli/defradb_client_p2p_replicator_delete.md similarity index 58% rename from docs/cli/defradb_client_rpc_replicator_delete.md rename to docs/cli/defradb_client_p2p_replicator_delete.md index c851d2f508..fac7d4ce06 100644 --- a/docs/cli/defradb_client_rpc_replicator_delete.md +++ b/docs/cli/defradb_client_p2p_replicator_delete.md @@ -1,27 +1,30 @@ -## defradb client rpc replicator delete +## defradb client p2p replicator delete -Delete a replicator. It will stop synchronizing +Delete replicator(s) and stop synchronization ### Synopsis -Delete a replicator. It will stop synchronizing. +Delete replicator(s) and stop synchronization. +A replicator synchronizes one or all collection(s) from this node to another. + +Example: + defradb client p2p replicator delete -c Users '{"ID": "12D3", "Addrs": ["/ip4/0.0.0.0/tcp/9171"]}' + ``` -defradb client rpc replicator delete [-f, --full | -c, --collection] [flags] +defradb client p2p replicator delete [-c, --collection] [flags] ``` ### Options ``` - -c, --collection stringArray Define the collection for the replicator - -f, --full Set the replicator to act on all collections - -h, --help help for delete + -c, --collection strings Collection(s) to stop replicating + -h, --help help for delete ``` ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -29,10 +32,11 @@ defradb client rpc replicator delete [-f, --full | -c, --collection] [fla --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system +* [defradb client p2p replicator](defradb_client_p2p_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_rpc_replicator_getall.md b/docs/cli/defradb_client_p2p_replicator_getall.md similarity index 74% rename from docs/cli/defradb_client_rpc_replicator_getall.md rename to docs/cli/defradb_client_p2p_replicator_getall.md index 2449dba1fd..4c0c81f6d1 100644 --- a/docs/cli/defradb_client_rpc_replicator_getall.md +++ b/docs/cli/defradb_client_p2p_replicator_getall.md @@ -1,14 +1,18 @@ -## defradb client rpc replicator getall +## defradb client p2p replicator getall Get all replicators ### Synopsis Get all the replicators active in the P2P data sync system. -These are the replicators that are currently replicating data from one node to another. +A replicator synchronizes one or all collection(s) from this node to another. + +Example: + defradb client p2p replicator getall + ``` -defradb client rpc replicator getall [flags] +defradb client p2p replicator getall [flags] ``` ### Options @@ -20,7 +24,6 @@ defradb client rpc replicator getall [flags] ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -28,10 +31,11 @@ defradb client rpc replicator getall [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system +* [defradb client p2p replicator](defradb_client_p2p_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_rpc_replicator_set.md b/docs/cli/defradb_client_p2p_replicator_set.md similarity index 59% rename from docs/cli/defradb_client_rpc_replicator_set.md rename to docs/cli/defradb_client_p2p_replicator_set.md index 24b7add648..86f47fc30f 100644 --- a/docs/cli/defradb_client_rpc_replicator_set.md +++ b/docs/cli/defradb_client_p2p_replicator_set.md @@ -1,29 +1,30 @@ -## defradb client rpc replicator set +## defradb client p2p replicator set -Set a P2P replicator +Add replicator(s) and start synchronization ### Synopsis -Add a new target replicator. -A replicator replicates one or all collection(s) from this node to another. +Add replicator(s) and start synchronization. +A replicator synchronizes one or all collection(s) from this node to another. + +Example: + defradb client p2p replicator set -c Users '{"ID": "12D3", "Addrs": ["/ip4/0.0.0.0/tcp/9171"]}' ``` -defradb client rpc replicator set [-f, --full | -c, --collection] [flags] +defradb client p2p replicator set [-c, --collection] [flags] ``` ### Options ``` - -c, --collection stringArray Define the collection for the replicator - -f, --full Set the replicator to act on all collections - -h, --help help for set + -c, --collection strings Collection(s) to replicate + -h, --help help for set ``` ### Options inherited from parent commands ``` - --addr string RPC endpoint address (default "0.0.0.0:9161") --logformat string Log format to use. Options are csv, json (default "csv") --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") @@ -31,10 +32,11 @@ defradb client rpc replicator set [-f, --full | -c, --collection] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client rpc replicator](defradb_client_rpc_replicator.md) - Configure the replicator system +* [defradb client p2p replicator](defradb_client_p2p_replicator.md) - Configure the replicator system diff --git a/docs/cli/defradb_client_query.md b/docs/cli/defradb_client_query.md index 8f5c3477c3..5e748229e2 100644 --- a/docs/cli/defradb_client_query.md +++ b/docs/cli/defradb_client_query.md @@ -41,6 +41,7 @@ defradb client query [query request] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_rpc_addreplicator.md b/docs/cli/defradb_client_rpc_addreplicator.md deleted file mode 100644 index e80b667f18..0000000000 --- a/docs/cli/defradb_client_rpc_addreplicator.md +++ /dev/null @@ -1,37 +0,0 @@ -## defradb client rpc addreplicator - -Add a new replicator - -### Synopsis - -Use this command if you wish to add a new target replicator -for the P2P data sync system. - -``` -defradb client rpc addreplicator [flags] -``` - -### Options - -``` - -h, --help help for addreplicator -``` - -### Options inherited from parent commands - -``` - --addr string gRPC endpoint address (default "0.0.0.0:9161") - --logformat string Log format to use. Options are csv, json (default "csv") - --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... - --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") - --lognocolor Disable colored log output - --logoutput string Log output path (default "stderr") - --logtrace Include stacktrace in error and fatal logs - --rootdir string Directory for data and configuration to use (default "$HOME/.defradb") - --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") -``` - -### SEE ALSO - -* [defradb client rpc](defradb_client_rpc.md) - Interact with a DefraDB gRPC server - diff --git a/docs/cli/defradb_client_schema.md b/docs/cli/defradb_client_schema.md index c36c8d4bce..ee3422741f 100644 --- a/docs/cli/defradb_client_schema.md +++ b/docs/cli/defradb_client_schema.md @@ -22,6 +22,7 @@ Make changes, updates, or look for existing schema types. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` @@ -29,7 +30,8 @@ Make changes, updates, or look for existing schema types. * [defradb client](defradb_client.md) - Interact with a DefraDB node * [defradb client schema add](defradb_client_schema_add.md) - Add new schema -* [defradb client schema list](defradb_client_schema_list.md) - List schema types with their respective fields +* [defradb client schema describe](defradb_client_schema_describe.md) - View schema descriptions. * [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance * [defradb client schema patch](defradb_client_schema_patch.md) - Patch an existing schema type +* [defradb client schema set-default](defradb_client_schema_set-default.md) - Set the default schema version diff --git a/docs/cli/defradb_client_schema_add.md b/docs/cli/defradb_client_schema_add.md index b278431034..aa73039d0c 100644 --- a/docs/cli/defradb_client_schema_add.md +++ b/docs/cli/defradb_client_schema_add.md @@ -38,6 +38,7 @@ defradb client schema add [schema] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_schema_describe.md b/docs/cli/defradb_client_schema_describe.md new file mode 100644 index 0000000000..749ba24235 --- /dev/null +++ b/docs/cli/defradb_client_schema_describe.md @@ -0,0 +1,52 @@ +## defradb client schema describe + +View schema descriptions. + +### Synopsis + +Introspect schema types. + +Example: view all schemas + defradb client schema describe + +Example: view schemas by name + defradb client schema describe --name User + +Example: view schemas by root + defradb client schema describe --root bae123 + +Example: view a single schema by version id + defradb client schema describe --version bae123 + + +``` +defradb client schema describe [flags] +``` + +### Options + +``` + -h, --help help for describe + --name string Schema name + --root string Schema root + --version string Schema Version ID +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node + diff --git a/docs/cli/defradb_client_schema_migration.md b/docs/cli/defradb_client_schema_migration.md index 0a20968378..c0910ad746 100644 --- a/docs/cli/defradb_client_schema_migration.md +++ b/docs/cli/defradb_client_schema_migration.md @@ -22,12 +22,16 @@ Make set or look for existing schema migrations on a DefraDB node. --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO * [defradb client schema](defradb_client_schema.md) - Interact with the schema system of a DefraDB node +* [defradb client schema migration down](defradb_client_schema_migration_down.md) - Reverses the migration from the specified schema version. * [defradb client schema migration get](defradb_client_schema_migration_get.md) - Gets the schema migrations within DefraDB +* [defradb client schema migration reload](defradb_client_schema_migration_reload.md) - Reload the schema migrations within DefraDB * [defradb client schema migration set](defradb_client_schema_migration_set.md) - Set a schema migration within DefraDB +* [defradb client schema migration up](defradb_client_schema_migration_up.md) - Applies the migration to the specified schema version. diff --git a/docs/cli/defradb_client_schema_migration_down.md b/docs/cli/defradb_client_schema_migration_down.md new file mode 100644 index 0000000000..afb631e267 --- /dev/null +++ b/docs/cli/defradb_client_schema_migration_down.md @@ -0,0 +1,49 @@ +## defradb client schema migration down + +Reverses the migration from the specified schema version. + +### Synopsis + +Reverses the migration from the specified schema version. +Documents is a list of documents to reverse the migration from. + +Example: migrate from string + defradb client schema migration down --version bae123 '[{"name": "Bob"}]' + +Example: migrate from file + defradb client schema migration down --version bae123 -f documents.json + +Example: migrate from stdin + cat documents.json | defradb client schema migration down --version bae123 - + + +``` +defradb client schema migration down --version [flags] +``` + +### Options + +``` + -f, --file string File containing document(s) + -h, --help help for down + --version string Schema version id +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance + diff --git a/docs/cli/defradb_client_schema_migration_get.md b/docs/cli/defradb_client_schema_migration_get.md index d2164ed6bd..20ed8edb91 100644 --- a/docs/cli/defradb_client_schema_migration_get.md +++ b/docs/cli/defradb_client_schema_migration_get.md @@ -31,6 +31,7 @@ defradb client schema migration get [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_schema_migration_reload.md b/docs/cli/defradb_client_schema_migration_reload.md new file mode 100644 index 0000000000..f9acfd2d19 --- /dev/null +++ b/docs/cli/defradb_client_schema_migration_reload.md @@ -0,0 +1,36 @@ +## defradb client schema migration reload + +Reload the schema migrations within DefraDB + +### Synopsis + +Reload the schema migrations within DefraDB + +``` +defradb client schema migration reload [flags] +``` + +### Options + +``` + -h, --help help for reload +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance + diff --git a/docs/cli/defradb_client_schema_migration_set.md b/docs/cli/defradb_client_schema_migration_set.md index 8013fd2a29..b9626bfeed 100644 --- a/docs/cli/defradb_client_schema_migration_set.md +++ b/docs/cli/defradb_client_schema_migration_set.md @@ -38,6 +38,7 @@ defradb client schema migration set [src] [dst] [cfg] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_schema_migration_up.md b/docs/cli/defradb_client_schema_migration_up.md new file mode 100644 index 0000000000..8f7688ccad --- /dev/null +++ b/docs/cli/defradb_client_schema_migration_up.md @@ -0,0 +1,49 @@ +## defradb client schema migration up + +Applies the migration to the specified schema version. + +### Synopsis + +Applies the migration to the specified schema version. +Documents is a list of documents to apply the migration to. + +Example: migrate from string + defradb client schema migration up --version bae123 '[{"name": "Bob"}]' + +Example: migrate from file + defradb client schema migration up --version bae123 -f documents.json + +Example: migrate from stdin + cat documents.json | defradb client schema migration up --version bae123 - + + +``` +defradb client schema migration up --version [flags] +``` + +### Options + +``` + -f, --file string File containing document(s) + -h, --help help for up + --version string Schema version id +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client schema migration](defradb_client_schema_migration.md) - Interact with the schema migration system of a running DefraDB instance + diff --git a/docs/cli/defradb_client_schema_patch.md b/docs/cli/defradb_client_schema_patch.md index ec64d293e0..357b43b2a8 100644 --- a/docs/cli/defradb_client_schema_patch.md +++ b/docs/cli/defradb_client_schema_patch.md @@ -28,6 +28,7 @@ defradb client schema patch [schema] [flags] ``` -f, --file string File to load a patch from -h, --help help for patch + --set-default Set default schema version ``` ### Options inherited from parent commands @@ -40,6 +41,7 @@ defradb client schema patch [schema] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_schema_list.md b/docs/cli/defradb_client_schema_set-default.md similarity index 78% rename from docs/cli/defradb_client_schema_list.md rename to docs/cli/defradb_client_schema_set-default.md index ffbe253e31..0698b0e6d5 100644 --- a/docs/cli/defradb_client_schema_list.md +++ b/docs/cli/defradb_client_schema_set-default.md @@ -1,15 +1,19 @@ -## defradb client schema list +## defradb client schema set-default -List schema types with their respective fields +Set the default schema version + +### Synopsis + +Set the default schema version ``` -defradb client schema list [flags] +defradb client schema set-default [versionID] [flags] ``` ### Options ``` - -h, --help help for list + -h, --help help for set-default ``` ### Options inherited from parent commands @@ -22,6 +26,7 @@ defradb client schema list [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` diff --git a/docs/cli/defradb_client_blocks.md b/docs/cli/defradb_client_tx.md similarity index 63% rename from docs/cli/defradb_client_blocks.md rename to docs/cli/defradb_client_tx.md index e05a853440..4feab4af7b 100644 --- a/docs/cli/defradb_client_blocks.md +++ b/docs/cli/defradb_client_tx.md @@ -1,11 +1,15 @@ -## defradb client blocks +## defradb client tx -Interact with the database's blockstore +Create, commit, and discard DefraDB transactions + +### Synopsis + +Create, commit, and discard DefraDB transactions ### Options ``` - -h, --help help for blocks + -h, --help help for tx ``` ### Options inherited from parent commands @@ -18,11 +22,14 @@ Interact with the database's blockstore --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO * [defradb client](defradb_client.md) - Interact with a DefraDB node -* [defradb client blocks get](defradb_client_blocks_get.md) - Get a block by its CID from the blockstore +* [defradb client tx commit](defradb_client_tx_commit.md) - Commit a DefraDB transaction. +* [defradb client tx create](defradb_client_tx_create.md) - Create a new DefraDB transaction. +* [defradb client tx discard](defradb_client_tx_discard.md) - Discard a DefraDB transaction. diff --git a/docs/cli/defradb_client_blocks_get.md b/docs/cli/defradb_client_tx_commit.md similarity index 71% rename from docs/cli/defradb_client_blocks_get.md rename to docs/cli/defradb_client_tx_commit.md index 38ff02b63c..21f0b50325 100644 --- a/docs/cli/defradb_client_blocks_get.md +++ b/docs/cli/defradb_client_tx_commit.md @@ -1,15 +1,19 @@ -## defradb client blocks get +## defradb client tx commit -Get a block by its CID from the blockstore +Commit a DefraDB transaction. + +### Synopsis + +Commit a DefraDB transaction. ``` -defradb client blocks get [CID] [flags] +defradb client tx commit [id] [flags] ``` ### Options ``` - -h, --help help for get + -h, --help help for commit ``` ### Options inherited from parent commands @@ -22,10 +26,11 @@ defradb client blocks get [CID] [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client blocks](defradb_client_blocks.md) - Interact with the database's blockstore +* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions diff --git a/docs/cli/defradb_client_tx_create.md b/docs/cli/defradb_client_tx_create.md new file mode 100644 index 0000000000..8ba600b611 --- /dev/null +++ b/docs/cli/defradb_client_tx_create.md @@ -0,0 +1,38 @@ +## defradb client tx create + +Create a new DefraDB transaction. + +### Synopsis + +Create a new DefraDB transaction. + +``` +defradb client tx create [flags] +``` + +### Options + +``` + --concurrent Transaction is concurrent + -h, --help help for create + --read-only Transaction is read only +``` + +### Options inherited from parent commands + +``` + --logformat string Log format to use. Options are csv, json (default "csv") + --logger stringArray Override logger parameters. Usage: --logger ,level=,output=,... + --loglevel string Log level to use. Options are debug, info, error, fatal (default "info") + --lognocolor Disable colored log output + --logoutput string Log output path (default "stderr") + --logtrace Include stacktrace in error and fatal logs + --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID + --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") +``` + +### SEE ALSO + +* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions + diff --git a/docs/cli/defradb_client_ping.md b/docs/cli/defradb_client_tx_discard.md similarity index 71% rename from docs/cli/defradb_client_ping.md rename to docs/cli/defradb_client_tx_discard.md index 8edd7aff94..d1f0bb6025 100644 --- a/docs/cli/defradb_client_ping.md +++ b/docs/cli/defradb_client_tx_discard.md @@ -1,15 +1,19 @@ -## defradb client ping +## defradb client tx discard -Ping to test connection with a node +Discard a DefraDB transaction. + +### Synopsis + +Discard a DefraDB transaction. ``` -defradb client ping [flags] +defradb client tx discard [id] [flags] ``` ### Options ``` - -h, --help help for ping + -h, --help help for discard ``` ### Options inherited from parent commands @@ -22,10 +26,11 @@ defradb client ping [flags] --logoutput string Log output path (default "stderr") --logtrace Include stacktrace in error and fatal logs --rootdir string Directory for data and configuration to use (default: $HOME/.defradb) + --tx uint Transaction ID --url string URL of HTTP endpoint to listen on or connect to (default "localhost:9181") ``` ### SEE ALSO -* [defradb client](defradb_client.md) - Interact with a DefraDB node +* [defradb client tx](defradb_client_tx.md) - Create, commit, and discard DefraDB transactions diff --git a/docs/cli/defradb_start.md b/docs/cli/defradb_start.md index d23b1fcacb..174700674b 100644 --- a/docs/cli/defradb_start.md +++ b/docs/cli/defradb_start.md @@ -23,7 +23,6 @@ defradb start [flags] --privkeypath string Path to the private key for tls (default "certs/server.crt") --pubkeypath string Path to the public key for tls (default "certs/server.key") --store string Specify the datastore to use (supported: badger, memory) (default "badger") - --tcpaddr string Listener address for the tcp gRPC server (formatted as a libp2p MultiAddr) (default "/ip4/0.0.0.0/tcp/9161") --tls Enable serving the API over https --valuelogfilesize ByteSize Specify the datastore value log file size (in bytes). In memory size will be 2*valuelogfilesize (default 1GiB) ``` diff --git a/docs/data_format_changes/i1083-rm-col-name-from-schema-id.md b/docs/data_format_changes/i1083-rm-col-name-from-schema-id.md new file mode 100644 index 0000000000..8ed76b8f27 --- /dev/null +++ b/docs/data_format_changes/i1083-rm-col-name-from-schema-id.md @@ -0,0 +1,3 @@ +# Remove collection name from schema ID generation + +The collection name was removed from the schema ID generation, this caused test schema IDs and commit CIDs to change. Will also impact production systems, as identical schemas created on different defra versions would not have the same IDs. \ No newline at end of file diff --git a/docs/data_format_changes/i1436-no-change-tests-updated.md b/docs/data_format_changes/i1436-no-change-tests-updated.md new file mode 100644 index 0000000000..89f7305133 --- /dev/null +++ b/docs/data_format_changes/i1436-no-change-tests-updated.md @@ -0,0 +1,3 @@ +# Parallel change detector + +This is is not a breaking change. The change detector has been updated to allow for parallel test runs. There were changes to environment variables and test setup that makes the previous version of the change detector incompatible with this version. diff --git a/docs/data_format_changes/i1958-remove-col-description-schema.md b/docs/data_format_changes/i1958-remove-col-description-schema.md new file mode 100644 index 0000000000..3eadccae71 --- /dev/null +++ b/docs/data_format_changes/i1958-remove-col-description-schema.md @@ -0,0 +1,3 @@ +# Remove CollectionDescription.Schema + +The way schemas are stored has changed. Previously they were stored within a collection description, this PR splits them out to a new independent key. diff --git a/docs/data_format_changes/i1964-reorg-col-desc-storage.md b/docs/data_format_changes/i1964-reorg-col-desc-storage.md new file mode 100644 index 0000000000..6e88006230 --- /dev/null +++ b/docs/data_format_changes/i1964-reorg-col-desc-storage.md @@ -0,0 +1,3 @@ +# Reorganise collection description storage + +The way collection descriptions are stored and index in Defra has changed, please refer to https://github.com/sourcenetwork/defradb/pull/1988 and https://github.com/sourcenetwork/defradb/issues/1964 for more info. diff --git a/docs/data_format_changes/i2004-rename-schema-root.md b/docs/data_format_changes/i2004-rename-schema-root.md new file mode 100644 index 0000000000..4e0d95b7b0 --- /dev/null +++ b/docs/data_format_changes/i2004-rename-schema-root.md @@ -0,0 +1,3 @@ +# Rename Schema.SchemaID to Schema.Root + +Schema.SchemaID has been renamed to Schema.Root, as this property is used to generate the schema version id, which in turn forms part of the commit cids, the schema cids and commit cids have changed. diff --git a/errors/defraError.go b/errors/defraError.go index 2281add30e..2f05f1131d 100644 --- a/errors/defraError.go +++ b/errors/defraError.go @@ -58,13 +58,12 @@ func (e *defraError) Error() string { } func (e *defraError) Is(other error) bool { - switch otherTyped := other.(type) { - case *defraError: - return e.message == otherTyped.message - default: - otherString := other.Error() - return e.message == otherString || e.Error() == otherString || errors.Is(e.inner, other) + var otherDefraError *defraError + if errors.As(other, &otherDefraError) { + return e.message == otherDefraError.message } + otherString := other.Error() + return e.message == otherString || e.Error() == otherString || errors.Is(e.inner, other) } func (e *defraError) Unwrap() error { diff --git a/events/db_update.go b/events/db_update.go index da62a81e7e..d9479656a3 100644 --- a/events/db_update.go +++ b/events/db_update.go @@ -25,9 +25,9 @@ var EmptyUpdateChannel = immutable.None[Channel[Update]]() // UpdateEvent represents a new DAG node added to the append-only MerkleCRDT Clock graph // of a document or sub-field. type Update struct { - DocKey string - Cid cid.Cid - SchemaID string - Block ipld.Node - Priority uint64 + DocKey string + Cid cid.Cid + SchemaRoot string + Block ipld.Node + Priority uint64 } diff --git a/go.mod b/go.mod index 4af3d59cb4..dfe905f39e 100644 --- a/go.mod +++ b/go.mod @@ -3,19 +3,17 @@ module github.com/sourcenetwork/defradb go 1.20 require ( - github.com/bits-and-blooms/bitset v1.8.0 + github.com/bits-and-blooms/bitset v1.11.0 github.com/bxcodec/faker v2.0.1+incompatible - github.com/dgraph-io/badger/v4 v4.1.0 - github.com/evanphx/json-patch/v5 v5.6.0 + github.com/evanphx/json-patch/v5 v5.7.0 github.com/fxamacker/cbor/v2 v2.5.0 + github.com/getkin/kin-openapi v0.120.0 github.com/go-chi/chi/v5 v5.0.10 github.com/go-chi/cors v1.2.1 - github.com/go-errors/errors v1.5.0 + github.com/go-errors/errors v1.5.1 github.com/gofrs/uuid/v5 v5.0.0 - github.com/graphql-go/graphql v0.8.1 - github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/iancoleman/strcase v0.3.0 - github.com/ipfs/boxo v0.12.0 + github.com/ipfs/boxo v0.15.0 github.com/ipfs/go-block-format v0.2.0 github.com/ipfs/go-cid v0.4.1 github.com/ipfs/go-datastore v0.6.0 @@ -24,186 +22,172 @@ require ( github.com/ipfs/go-log/v2 v2.5.1 github.com/jbenet/goprocess v0.1.4 github.com/lens-vm/lens/host-go v0.0.0-20230729032926-5acb4df9bd25 - github.com/libp2p/go-libp2p v0.29.2 + github.com/libp2p/go-libp2p v0.32.1 github.com/libp2p/go-libp2p-gostream v0.6.0 - github.com/libp2p/go-libp2p-kad-dht v0.23.0 - github.com/libp2p/go-libp2p-pubsub v0.9.3 + github.com/libp2p/go-libp2p-kad-dht v0.25.1 + github.com/libp2p/go-libp2p-pubsub v0.10.0 github.com/libp2p/go-libp2p-record v0.2.0 github.com/mitchellh/mapstructure v1.5.0 - github.com/multiformats/go-multiaddr v0.10.1 + github.com/multiformats/go-multiaddr v0.12.0 github.com/multiformats/go-multibase v0.2.0 github.com/multiformats/go-multihash v0.2.3 - github.com/pkg/errors v0.9.1 - github.com/planetscale/vtprotobuf v0.5.0 + github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 + github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13 + github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd github.com/sourcenetwork/immutable v0.3.0 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.0 github.com/spf13/pflag v1.0.5 - github.com/spf13/viper v1.16.0 + github.com/spf13/viper v1.17.0 github.com/stretchr/testify v1.8.4 - github.com/textileio/go-libp2p-pubsub-rpc v0.0.9 - github.com/tidwall/btree v1.6.0 + github.com/tidwall/btree v1.7.0 github.com/ugorji/go/codec v1.2.11 github.com/valyala/fastjson v1.6.4 github.com/vito/go-sse v1.0.0 - go.opentelemetry.io/otel/metric v1.18.0 - go.opentelemetry.io/otel/sdk/metric v0.40.0 - go.uber.org/zap v1.25.0 - golang.org/x/crypto v0.13.0 - golang.org/x/net v0.14.0 - google.golang.org/grpc v1.58.0 + go.opentelemetry.io/otel/metric v1.19.0 + go.opentelemetry.io/otel/sdk/metric v1.19.0 + go.uber.org/zap v1.26.0 + golang.org/x/crypto v0.15.0 + golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa + golang.org/x/net v0.18.0 + google.golang.org/grpc v1.59.0 google.golang.org/protobuf v1.31.0 ) require ( + github.com/Jorropo/jsync v1.0.1 // indirect github.com/benbjohnson/clock v1.3.5 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/containerd/cgroups v1.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/cskr/pubsub v1.0.2 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect github.com/dgraph-io/ristretto v0.1.1 // indirect github.com/docker/go-units v0.5.0 // indirect - github.com/dustin/go-humanize v1.0.0 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect github.com/elastic/gosigar v0.14.2 // indirect github.com/flynn/noise v1.0.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect github.com/fsnotify/fsnotify v1.6.0 // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/swag v0.22.4 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/godbus/dbus/v5 v5.1.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/glog v1.1.0 // indirect + github.com/golang/glog v1.1.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/mock v1.6.0 // indirect github.com/golang/protobuf v1.5.3 // indirect - github.com/golang/snappy v0.0.4 // indirect github.com/google/flatbuffers v2.0.6+incompatible // indirect github.com/google/gopacket v1.1.19 // indirect - github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 // indirect - github.com/google/uuid v1.3.0 // indirect + github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b // indirect + github.com/google/uuid v1.3.1 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d // indirect - github.com/hashicorp/golang-lru/v2 v2.0.2 // indirect + github.com/hashicorp/golang-lru/arc/v2 v2.0.5 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hashicorp/hcl v1.0.0 // indirect - github.com/hsanjuan/ipfs-lite v1.4.1 // indirect - github.com/huin/goupnp v1.2.0 // indirect + github.com/huin/goupnp v1.3.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/yaml v0.2.0 // indirect github.com/ipfs/bbloom v0.0.4 // indirect - github.com/ipfs/go-bitswap v0.12.0 // indirect - github.com/ipfs/go-blockservice v0.5.1 // indirect - github.com/ipfs/go-fetcher v1.6.1 // indirect - github.com/ipfs/go-ipfs-blockstore v1.2.0 // indirect - github.com/ipfs/go-ipfs-config v0.19.0 // indirect github.com/ipfs/go-ipfs-delay v0.0.1 // indirect - github.com/ipfs/go-ipfs-exchange-interface v0.2.0 // indirect - github.com/ipfs/go-ipfs-exchange-offline v0.3.0 // indirect - github.com/ipfs/go-ipfs-files v0.3.0 // indirect github.com/ipfs/go-ipfs-pq v0.0.3 // indirect - github.com/ipfs/go-ipfs-util v0.0.2 // indirect - github.com/ipfs/go-ipld-cbor v0.0.6 // indirect + github.com/ipfs/go-ipfs-util v0.0.3 // indirect github.com/ipfs/go-ipld-legacy v0.2.1 // indirect - github.com/ipfs/go-ipns v0.3.0 // indirect - github.com/ipfs/go-libipfs v0.7.0 // indirect - github.com/ipfs/go-merkledag v0.9.0 // indirect github.com/ipfs/go-metrics-interface v0.0.1 // indirect github.com/ipfs/go-peertaskqueue v0.8.1 // indirect - github.com/ipfs/interface-go-ipfs-core v0.10.0 // indirect github.com/ipld/go-codec-dagpb v1.6.0 // indirect github.com/ipld/go-ipld-prime v0.21.0 // indirect github.com/jackpal/go-nat-pmp v1.0.2 // indirect github.com/jbenet/go-temp-err-catcher v0.1.0 // indirect - github.com/klauspost/compress v1.16.7 // indirect - github.com/klauspost/cpuid/v2 v2.2.5 // indirect + github.com/josharian/intern v1.0.0 // indirect + github.com/klauspost/compress v1.17.2 // indirect + github.com/klauspost/cpuid/v2 v2.2.6 // indirect github.com/koron/go-ssdp v0.0.4 // indirect github.com/libp2p/go-buffer-pool v0.1.0 // indirect github.com/libp2p/go-cidranger v1.1.0 // indirect github.com/libp2p/go-flow-metrics v0.1.0 // indirect github.com/libp2p/go-libp2p-asn-util v0.3.0 // indirect - github.com/libp2p/go-libp2p-connmgr v0.4.0 // indirect - github.com/libp2p/go-libp2p-core v0.20.0 // indirect - github.com/libp2p/go-libp2p-kbucket v0.6.0 // indirect - github.com/libp2p/go-libp2p-routing-helpers v0.7.0 // indirect + github.com/libp2p/go-libp2p-kbucket v0.6.3 // indirect + github.com/libp2p/go-libp2p-routing-helpers v0.7.3 // indirect github.com/libp2p/go-msgio v0.3.0 // indirect github.com/libp2p/go-nat v0.2.0 // indirect github.com/libp2p/go-netroute v0.2.1 // indirect - github.com/libp2p/go-reuseport v0.3.0 // indirect + github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.1 // indirect github.com/magiconair/properties v1.8.7 // indirect + github.com/mailru/easyjson v0.7.7 // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect - github.com/mattn/go-isatty v0.0.19 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect - github.com/miekg/dns v1.1.55 // indirect + github.com/miekg/dns v1.1.56 // indirect github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b // indirect github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc // indirect github.com/minio/sha256-simd v1.0.1 // indirect + github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base32 v0.1.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-dns v0.3.1 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect github.com/multiformats/go-multicodec v0.9.0 // indirect - github.com/multiformats/go-multistream v0.4.1 // indirect + github.com/multiformats/go-multistream v0.5.0 // indirect github.com/multiformats/go-varint v0.0.7 // indirect - github.com/onsi/ginkgo/v2 v2.11.0 // indirect - github.com/opencontainers/runtime-spec v1.0.2 // indirect + github.com/onsi/ginkgo v1.16.5 // indirect + github.com/onsi/ginkgo/v2 v2.13.0 // indirect + github.com/opencontainers/runtime-spec v1.1.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect - github.com/pelletier/go-toml/v2 v2.0.8 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pelletier/go-toml/v2 v2.1.0 // indirect + github.com/perimeterx/marshmallow v1.1.5 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/polydawn/refmt v0.89.0 // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.4.0 // indirect - github.com/prometheus/common v0.42.0 // indirect - github.com/prometheus/procfs v0.9.0 // indirect + github.com/prometheus/client_golang v1.17.0 // indirect + github.com/prometheus/client_model v0.5.0 // indirect + github.com/prometheus/common v0.44.0 // indirect + github.com/prometheus/procfs v0.12.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-19 v0.3.3 // indirect - github.com/quic-go/qtls-go1-20 v0.2.3 // indirect - github.com/quic-go/quic-go v0.36.4 // indirect - github.com/quic-go/webtransport-go v0.5.3 // indirect + github.com/quic-go/qtls-go1-20 v0.3.4 // indirect + github.com/quic-go/quic-go v0.39.3 // indirect + github.com/quic-go/webtransport-go v0.6.0 // indirect github.com/raulk/go-watchdog v1.3.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/sagikazarmark/locafero v0.3.0 // indirect + github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/sourcegraph/conc v0.3.0 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect - github.com/spf13/afero v1.9.5 // indirect + github.com/spf13/afero v1.10.0 // indirect github.com/spf13/cast v1.5.1 // indirect - github.com/spf13/jwalterweatherman v1.1.0 // indirect github.com/stretchr/objx v0.5.0 // indirect - github.com/subosito/gotenv v1.4.2 // indirect + github.com/subosito/gotenv v1.6.0 // indirect github.com/tetratelabs/wazero v1.3.1 // indirect github.com/textileio/go-log/v2 v2.1.3-gke-2 // indirect - github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa // indirect github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 // indirect github.com/x448/float16 v0.8.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/otel v1.18.0 // indirect - go.opentelemetry.io/otel/sdk v1.17.0 // indirect - go.opentelemetry.io/otel/trace v1.18.0 // indirect - go.uber.org/dig v1.17.0 // indirect - go.uber.org/fx v1.20.0 // indirect + go.opentelemetry.io/otel v1.19.0 // indirect + go.opentelemetry.io/otel/sdk v1.19.0 // indirect + go.opentelemetry.io/otel/trace v1.19.0 // indirect + go.uber.org/dig v1.17.1 // indirect + go.uber.org/fx v1.20.1 // indirect + go.uber.org/mock v0.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect - golang.org/x/mod v0.12.0 // indirect - golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.12.0 // indirect - golang.org/x/text v0.13.0 // indirect - golang.org/x/tools v0.11.0 // indirect - golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect + golang.org/x/mod v0.14.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/sys v0.14.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.15.0 // indirect gonum.org/v1/gonum v0.13.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect gopkg.in/ini.v1 v1.67.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect lukechampine.com/blake3 v1.2.1 // indirect ) - -// SourceNetwork fork og graphql-go -replace ( - github.com/dgraph-io/badger/v4 => github.com/sourcenetwork/badger/v4 v4.0.0-20230801145501-d3a57bd4c2ec - github.com/graphql-go/graphql => github.com/sourcenetwork/graphql-go v0.7.10-0.20230511091704-fe7085512c23 - github.com/textileio/go-libp2p-pubsub-rpc => github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.0-20230209220544-e16d5e34c4fc -) diff --git a/go.sum b/go.sum index 9cc9fad3fd..cfc064271c 100644 --- a/go.sum +++ b/go.sum @@ -43,171 +43,92 @@ dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBr dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4= dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= -github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Kubuxu/go-os-helper v0.0.1/go.mod h1:N8B+I7vPCT80IcP58r50u4+gEEcsZETFUpAzWW2ep1Y= -github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/Stebalien/go-bitfield v0.0.1/go.mod h1:GNjFpasyUVkHMsfEOk8EFLJ9syQ6SI+XWrX9Wf2XH0s= -github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= -github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII= -github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/Jorropo/jsync v1.0.1 h1:6HgRolFZnsdfzRUj+ImB9og1JYOxQoReSywkHOGSaUU= +github.com/Jorropo/jsync v1.0.1/go.mod h1:jCOZj3vrBCri3bSU3ErUYvevKlnbssrXeCivybS5ABQ= github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o= github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= -github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/bits-and-blooms/bitset v1.8.0 h1:FD+XqgOZDUxxZ8hzoBFuV9+cGWY9CslN6d5MS5JVb4c= -github.com/bits-and-blooms/bitset v1.8.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= +github.com/bits-and-blooms/bitset v1.11.0 h1:RMyy2mBBShArUAhfVRZJ2xyBO58KCBCtZFShw3umo6k= +github.com/bits-and-blooms/bitset v1.11.0/go.mod h1:7hO7Gc7Pp1vODcmWvKMRA9BNmbv6a/7QIWpPxHddWR8= github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g= -github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dmnfpw9KyYoQyYo7d0KEvTkk/5M/vbZjAr8= -github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190605094302-a0d1e3e36d50/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= -github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= -github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= -github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= -github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= -github.com/btcsuite/goleveldb v1.0.0/go.mod h1:QiK9vBlgftBg6rWQIj6wFzbPfRjiykIEhBH4obrXJ/I= -github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/snappy-go v1.0.0/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= -github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY= -github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs= github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/bxcodec/faker v2.0.1+incompatible h1:P0KUpUw5w6WJXwrPfv35oc91i4d8nf40Nwln+M/+faA= github.com/bxcodec/faker v2.0.1+incompatible/go.mod h1:BNzfpVdTwnFJ6GtfYTcQu6l6rHShT+veBxNCnjCx5XM= -github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= -github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= -github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/cilium/ebpf v0.2.0/go.mod h1:To2CFviqOWL/M0gIMsvSMlqe7em/l1ALkX1PyjrX2Qs= -github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= -github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.2.1-0.20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= -github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3 h1:HVTnpeuvF6Owjd5mniCL8DEXo7uYXdQEmOP4FJbV5tg= -github.com/crackcomm/go-gitignore v0.0.0-20170627025303-887ab5e44cc3/go.mod h1:p1d6YEZWvFzEh4KLyvBcVSnrfNDDvK2zfK/4x2v/4pE= -github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cskr/pubsub v1.0.2 h1:vlOzMhl6PFn60gRlTQQsIfVwaPB/B/8MziK8FhEPt/0= github.com/cskr/pubsub v1.0.2/go.mod h1:/8MzYXk/NJAz782G8RPkFzXTZVu63VotefPnR9TIRis= -github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018/go.mod h1:rQYf4tfk5sSwFsnDg3qYaBxSjsD9S8+59vW0dKUgme4= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c h1:pFUpOrbxDR6AkioZ1ySsx5yxlDQZ8stG2b88gTPxgJU= github.com/davidlazar/go-crypto v0.0.0-20200604182044-b73af7476f6c/go.mod h1:6UhI8N9EjYm1c2odKpFpAYeR8dsBeM7PtzQhRgxRr9U= github.com/decred/dcrd/crypto/blake256 v1.0.1 h1:7PltbUIQB7u/FfZ39+DGa/ShuMyJ5ilcvdfma9wOH6Y= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 h1:8UrgZ3GkP4i/CLijOJx79Yu+etlyjdBU4sfcs2WYQMs= github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0/go.mod h1:v57UDF4pDQJcEfFUCRop3lJL149eHGSe9Jvczhzjo/0= -github.com/decred/dcrd/lru v1.0.0/go.mod h1:mxKOwFd7lFjN2GZYsiz/ecgqR6kkYAl+0pz0tEMk218= -github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzAIRPHRVNRKRo6AXrX9BJegn6il06VMTZVJYCIjQ= -github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= -github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger/v3 v3.2011.1 h1:Hmyof0WMEF/QtutX5SQHzIMnJQxb/IrSzhjckV2SD6g= -github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.1.1 h1:6CWw5tJNgpegArSHpNHJKldNeq03FQCwYvfMVWajOK8= github.com/dgraph-io/ristretto v0.1.1/go.mod h1:S1GPSBCYCIhmVNfcth17y2zZtQT6wzkzgwUve0VDWWA= -github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= -github.com/dgryski/go-farm v0.0.0-20190104051053-3adb47b1fb0f/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= github.com/elastic/gosigar v0.12.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= -github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= -github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= -github.com/facebookgo/atomicfile v0.0.0-20151019160806-2de1f203e7d5/go.mod h1:JpoxHjuQauoxiFMl1ie8Xc/7TfLuMZ5eOCONd1sUBHg= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/evanphx/json-patch/v5 v5.7.0 h1:nJqP7uwL84RJInrohHfW0Fx3awjbm8qZeFv0nW9SYGc= +github.com/evanphx/json-patch/v5 v5.7.0/go.mod h1:VNkHZ/282BpEyt/tObQO8s5CMPmYYq14uClGH4abBuQ= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/flynn/noise v1.0.0 h1:DlTHqmzmvcEiKj+4RYo/imoswx/4r6iBlCMfVtrMXpQ= github.com/flynn/noise v1.0.0/go.mod h1:xbMo+0i6+IGbYdJhF31t2eR1BIU0CYc12+BNAKwUTag= github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk= github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY= -github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= -github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= @@ -215,35 +136,34 @@ github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4 github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/getkin/kin-openapi v0.120.0 h1:MqJcNJFrMDFNc07iwE8iFC5eT2k/NPUFDIpNeiZv8Jg= +github.com/getkin/kin-openapi v0.120.0/go.mod h1:PCWw/lfBrJY4HcdqE3jj+QFkaFK8ABoqo7PvqVhXXqw= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-check/check v0.0.0-20180628173108-788fd7840127/go.mod h1:9ES+weclKsC9YodN5RgxqK/VD9HM9JsCSh7rNhMZE98= github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk= github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4= github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= -github.com/go-errors/errors v1.5.0 h1:/EuijeGOu7ckFxzhkj4CXJ8JaenxK7bKUxpPYqeLHqQ= -github.com/go-errors/errors v1.5.0/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= -github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= -github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= +github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-test/deep v1.0.8 h1:TDsG77qcSprGbC6vTN8OuXp5g+J+b5Pcguhf7Zt61VM= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -251,20 +171,14 @@ github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk= github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid/v5 v5.0.0 h1:p544++a97kEL+svbcFbCQVM9KFu0Yo25UoISXGNNH9M= github.com/gofrs/uuid/v5 v5.0.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= -github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= @@ -277,10 +191,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= @@ -297,9 +208,7 @@ github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= -github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/flatbuffers v2.0.6+incompatible h1:XHFReMv7nFFusa+CEokzWbzaYocKXI6C7hdU5Kgh9Lw= @@ -318,8 +227,6 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gopacket v1.1.17/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= @@ -335,594 +242,178 @@ github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hf github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8 h1:n6vlPhxsA+BW/XsS5+uqi7GyzaLa5MH7qlSLBZtRdiA= -github.com/google/pprof v0.0.0-20230705174524-200ffdc848b8/go.mod h1:Jh3hGz2jkYak8qXPD19ryItVnUgpgeqzdkY/D0EaeuA= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b h1:RMpPgZTSApbPf7xaVel+QkoGPRLFLrwFO89uDUHEGf0= +github.com/google/pprof v0.0.0-20231023181126-ff6d637d2a7b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190430165422-3e4dfb77656c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190812055157-5d271430af9f h1:KMlcu9X58lhTA/KrfX8Bi1LQSO4pzoVjTiL3h4Jk+Zk= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= -github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= -github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= -github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= -github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= -github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs= github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/golang-lru/v2 v2.0.2 h1:Dwmkdr5Nc/oBiXgJS3CDHNhJtIHkuZ3DZF5twqnfBdU= -github.com/hashicorp/golang-lru/v2 v2.0.2/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5 h1:l2zaLDubNhW4XO3LnliVj0GXO3+/CGNJAg1dcN2Fpfw= +github.com/hashicorp/golang-lru/arc/v2 v2.0.5/go.mod h1:ny6zBSQZi2JxIeYcv7kt2sH2PXJtirBN7RDhRpxPkxU= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= -github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= -github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/hsanjuan/ipfs-lite v1.4.1 h1:l+mnqk6wm2GiVJWn4u0UBtX+YqqA5cfsjX1ZujPxzx0= -github.com/hsanjuan/ipfs-lite v1.4.1/go.mod h1:+c/L+PWf0l7DhmQF3cO2O3GBRQT/pUZrl86VG//O9Hk= -github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= -github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= -github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY= -github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= -github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= +github.com/hsanjuan/ipfs-lite v1.8.1 h1:Rpd9bTXYgkmnt8M5QsZnWwtW6ebxAB7HlU/d0zE4BmA= +github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= +github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/influxdata/influxdb1-client v0.0.0-20191209144304-8bf82d3c094d/go.mod h1:qj24IKcXYK6Iy9ceXlo3Tc+vtHo9lIhSX5JddghvEPo= -github.com/ipfs/bbloom v0.0.1/go.mod h1:oqo8CVWsJFMOZqTglBG4wydCE4IQA/G2/SEofB0rjUI= +github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= +github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= github.com/ipfs/bbloom v0.0.4 h1:Gi+8EGJ2y5qiD5FbsbpX/TMNcJw8gSqr7eyjHa4Fhvs= github.com/ipfs/bbloom v0.0.4/go.mod h1:cS9YprKXpoZ9lT0n/Mw/a6/aFV6DTjTLYHeA+gyqMG0= -github.com/ipfs/boxo v0.12.0 h1:AXHg/1ONZdRQHQLgG5JHsSC3XoE4DjCAMgK+asZvUcQ= -github.com/ipfs/boxo v0.12.0/go.mod h1:xAnfiU6PtxWCnRqu7dcXQ10bB5/kvI1kXRotuGqGBhg= +github.com/ipfs/boxo v0.15.0 h1:BriLydj2nlK1nKeJQHxcKSuG5ZXcoutzhBklOtxC5pk= +github.com/ipfs/boxo v0.15.0/go.mod h1:X5ulcbR5Nh7sm3Db8+08AApUo6FsGC5mb23QDKAoB/M= github.com/ipfs/go-bitfield v1.1.0 h1:fh7FIo8bSwaJEh6DdTWbCeZ1eqOaOkKFI74SCnsWbGA= -github.com/ipfs/go-bitswap v0.0.9/go.mod h1:kAPf5qgn2W2DrgAcscZ3HrM9qh4pH+X8Fkk3UPrwvis= -github.com/ipfs/go-bitswap v0.1.0/go.mod h1:FFJEf18E9izuCqUtHxbWEvq+reg7o4CW5wSAE1wsxj0= -github.com/ipfs/go-bitswap v0.1.2/go.mod h1:qxSWS4NXGs7jQ6zQvoPY3+NmOfHHG47mhkiLzBpJQIs= -github.com/ipfs/go-bitswap v0.5.1/go.mod h1:P+ckC87ri1xFLvk74NlXdP0Kj9RmWAh4+H78sC6Qopo= -github.com/ipfs/go-bitswap v0.12.0 h1:ClbLaufwv8SRQK0sBhl4wDVqJoZGAGMVxdjQy5CTt6c= -github.com/ipfs/go-bitswap v0.12.0/go.mod h1:Iwjkd6+vaDjVIa6b6ogmZgs+b5U3EkIFEX79kQ4DjnI= -github.com/ipfs/go-block-format v0.0.1/go.mod h1:DK/YYcsSUIVAFNwo/KZCdIIbpN0ROH/baNLgayt4pFc= -github.com/ipfs/go-block-format v0.0.2/go.mod h1:AWR46JfpcObNfg3ok2JHDUfdiHRgWhJgCQF+KIgOPJY= -github.com/ipfs/go-block-format v0.0.3/go.mod h1:4LmD4ZUw0mhO+JSKdpWwrzATiEfM7WWgQ8H5l6P8MVk= github.com/ipfs/go-block-format v0.2.0 h1:ZqrkxBA2ICbDRbK8KJs/u0O3dlp6gmAuuXUJNiW1Ycs= github.com/ipfs/go-block-format v0.2.0/go.mod h1:+jpL11nFx5A/SPpsoBn6Bzkra/zaArfSmsknbPMYgzM= -github.com/ipfs/go-blockservice v0.0.7/go.mod h1:EOfb9k/Y878ZTRY/CH0x5+ATtaipfbRhbvNSdgc/7So= -github.com/ipfs/go-blockservice v0.1.0/go.mod h1:hzmMScl1kXHg3M2BjTymbVPjv627N7sYcvYaKbop39M= -github.com/ipfs/go-blockservice v0.2.1/go.mod h1:k6SiwmgyYgs4M/qt+ww6amPeUH9EISLRBnvUurKJhi8= -github.com/ipfs/go-blockservice v0.5.1 h1:9pAtkyKAz/skdHTh0kH8VulzWp+qmSDD0aI17TYP/s0= -github.com/ipfs/go-blockservice v0.5.1/go.mod h1:VpMblFEqG67A/H2sHKAemeH9vlURVavlysbdUI632yk= -github.com/ipfs/go-cid v0.0.1/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.2/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.3/go.mod h1:GHWU/WuQdMPmIosc4Yn1bcCT7dSeX4lBafM7iqUPQvM= -github.com/ipfs/go-cid v0.0.4/go.mod h1:4LLaPOQwmk5z9LBgQnpkivrx8BJjUyGwTXCd5Xfj6+M= -github.com/ipfs/go-cid v0.0.5/go.mod h1:plgt+Y5MnOey4vO4UlUazGqdbEXuFYitED67FexhXog= -github.com/ipfs/go-cid v0.0.6/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= -github.com/ipfs/go-cid v0.0.7/go.mod h1:6Ux9z5e+HpkQdckYoX1PG/6xqKspzlEIR5SDmgqgC/I= github.com/ipfs/go-cid v0.4.1 h1:A/T3qGvxi4kpKWWcPC/PgbvDA2bjVLO7n4UeVwnbs/s= github.com/ipfs/go-cid v0.4.1/go.mod h1:uQHwDeX4c6CtyrFwdqyhpNcxVewur1M7l7fNU7LKwZk= github.com/ipfs/go-cidutil v0.1.0 h1:RW5hO7Vcf16dplUU60Hs0AKDkQAVPVplr7lk97CFL+Q= -github.com/ipfs/go-datastore v0.0.1/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.0.5/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.0/go.mod h1:d4KVXhMt913cLBEI/PXAy6ko+W7e9AhyAKBGh803qeE= -github.com/ipfs/go-datastore v0.1.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.3.1/go.mod h1:w38XXW9kVFNp57Zj5knbKWM2T+KOZCGDRVNdgPHtbHw= -github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= -github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= -github.com/ipfs/go-datastore v0.5.0/go.mod h1:9zhEApYMTl17C8YDp7JmU7sQZi2/wqiYh73hakZ90Bk= github.com/ipfs/go-datastore v0.6.0 h1:JKyz+Gvz1QEZw0LsX1IBn+JFCJQH4SJVFtM4uWU0Myk= github.com/ipfs/go-datastore v0.6.0/go.mod h1:rt5M3nNbSO/8q1t4LNkLyUwRs8HupMeN/8O4Vn9YAT8= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= -github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= -github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= -github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= -github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= github.com/ipfs/go-ds-badger v0.3.0 h1:xREL3V0EH9S219kFFueOYJJTcjgNSZ2HY1iSvN7U1Ro= -github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= -github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-leveldb v0.5.0 h1:s++MEBbD3ZKc9/8/njrn4flZLnCuY9I79v94gBUNumo= -github.com/ipfs/go-fetcher v1.6.1 h1:UFuRVYX5AIllTiRhi5uK/iZkfhSpBCGX7L70nSZEmK8= -github.com/ipfs/go-fetcher v1.6.1/go.mod h1:27d/xMV8bodjVs9pugh/RCjjK2OZ68UgAMspMdingNo= -github.com/ipfs/go-ipfs-blockstore v0.0.1/go.mod h1:d3WClOmRQKFnJ0Jz/jj/zmksX0ma1gROTlovZKBmN08= -github.com/ipfs/go-ipfs-blockstore v0.1.0/go.mod h1:5aD0AvHPi7mZc6Ci1WCAhiBQu2IsfTduLl+422H6Rqw= -github.com/ipfs/go-ipfs-blockstore v0.2.1/go.mod h1:jGesd8EtCM3/zPgx+qr0/feTXGUeRai6adgwC+Q+JvE= -github.com/ipfs/go-ipfs-blockstore v1.2.0 h1:n3WTeJ4LdICWs/0VSfjHrlqpPpl6MZ+ySd3j8qz0ykw= -github.com/ipfs/go-ipfs-blockstore v1.2.0/go.mod h1:eh8eTFLiINYNSNawfZOC7HOxNTxpB1PFuA5E1m/7exE= github.com/ipfs/go-ipfs-blocksutil v0.0.1 h1:Eh/H4pc1hsvhzsQoMEP3Bke/aW5P5rVM1IWFJMcGIPQ= -github.com/ipfs/go-ipfs-blocksutil v0.0.1/go.mod h1:Yq4M86uIOmxmGPUHv/uI7uKqZNtLb449gwKqXjIsnRk= -github.com/ipfs/go-ipfs-chunker v0.0.1/go.mod h1:tWewYK0we3+rMbOh7pPFGDyypCtvGcBFymgY4rSDLAw= -github.com/ipfs/go-ipfs-chunker v0.0.5 h1:ojCf7HV/m+uS2vhUGWcogIIxiO5ubl5O57Q7NapWLY8= -github.com/ipfs/go-ipfs-config v0.19.0 h1:OuKIL+BkOZgJ+hb4Wg/9ynCtE/BaZBWcGy8hgdMepAo= -github.com/ipfs/go-ipfs-config v0.19.0/go.mod h1:wz2lKzOjgJeYJa6zx8W9VT7mz+iSd0laBMqS/9wmX6A= -github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-delay v0.0.1 h1:r/UXYyRcddO6thwOnhiznIAiSvxMECGgtv35Xs1IeRQ= github.com/ipfs/go-ipfs-delay v0.0.1/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= -github.com/ipfs/go-ipfs-ds-help v0.0.1/go.mod h1:gtP9xRaZXqIQRh1HRpp595KbBEdgqWFxefeVKOV8sxo= -github.com/ipfs/go-ipfs-ds-help v0.1.1/go.mod h1:SbBafGJuGsPI/QL3j9Fc5YPLeAu+SzOkI0gFwAg+mOs= -github.com/ipfs/go-ipfs-ds-help v1.1.0 h1:yLE2w9RAsl31LtfMt91tRZcrx+e61O5mDxFRR994w4Q= -github.com/ipfs/go-ipfs-ds-help v1.1.0/go.mod h1:YR5+6EaebOhfcqVCyqemItCLthrpVNot+rsOU/5IatU= -github.com/ipfs/go-ipfs-exchange-interface v0.0.1/go.mod h1:c8MwfHjtQjPoDyiy9cFquVtVHkO9b9Ob3FG91qJnWCM= -github.com/ipfs/go-ipfs-exchange-interface v0.1.0/go.mod h1:ych7WPlyHqFvCi/uQI48zLZuAWVP5iTQPXEfVaw5WEI= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0 h1:8lMSJmKogZYNo2jjhUs0izT+dck05pqUw4mWNW9Pw6Y= -github.com/ipfs/go-ipfs-exchange-interface v0.2.0/go.mod h1:z6+RhJuDQbqKguVyslSOuVDhqF9JtTrO3eptSAiW2/Y= -github.com/ipfs/go-ipfs-exchange-offline v0.0.1/go.mod h1:WhHSFCVYX36H/anEKQboAzpUws3x7UeEGkzQc3iNkM0= -github.com/ipfs/go-ipfs-exchange-offline v0.1.1/go.mod h1:vTiBRIbzSwDD0OWm+i3xeT0mO7jG2cbJYatp3HPk5XY= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0 h1:c/Dg8GDPzixGd0MC8Jh6mjOwU57uYokgWRFidfvEkuA= -github.com/ipfs/go-ipfs-exchange-offline v0.3.0/go.mod h1:MOdJ9DChbb5u37M1IcbrRB02e++Z7521fMxqCNRrz9s= -github.com/ipfs/go-ipfs-files v0.0.3/go.mod h1:INEFm0LL2LWXBhNJ2PMIIb2w45hpXgPjNoE7yA8Y1d4= -github.com/ipfs/go-ipfs-files v0.0.8/go.mod h1:wiN/jSG8FKyk7N0WyctKSvq3ljIa2NNTiZB55kpTdOs= -github.com/ipfs/go-ipfs-files v0.3.0 h1:fallckyc5PYjuMEitPNrjRfpwl7YFt69heCOUhsbGxQ= -github.com/ipfs/go-ipfs-files v0.3.0/go.mod h1:xAUtYMwB+iu/dtf6+muHNSFQCJG2dSiStR2P6sn9tIM= -github.com/ipfs/go-ipfs-posinfo v0.0.1 h1:Esoxj+1JgSjX0+ylc0hUmJCOv6V2vFoZiETLR6OtpRs= -github.com/ipfs/go-ipfs-posinfo v0.0.1/go.mod h1:SwyeVP+jCwiDu0C313l/8jg6ZxM0qqtlt2a0vILTc1A= -github.com/ipfs/go-ipfs-pq v0.0.1/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= -github.com/ipfs/go-ipfs-pq v0.0.2/go.mod h1:LWIqQpqfRG3fNc5XsnIhz/wQ2XXGyugQwls7BgUmUfY= github.com/ipfs/go-ipfs-pq v0.0.3 h1:YpoHVJB+jzK15mr/xsWC574tyDLkezVrDNeaalQBsTE= github.com/ipfs/go-ipfs-pq v0.0.3/go.mod h1:btNw5hsHBpRcSSgZtiNm/SLj5gYIZ18AKtv3kERkRb4= -github.com/ipfs/go-ipfs-provider v0.7.1 h1:eKToBUAb6ZY8iiA6AYVxzW4G1ep67XUaaEBUIYpxhfw= -github.com/ipfs/go-ipfs-routing v0.0.1/go.mod h1:k76lf20iKFxQTjcJokbPM9iBXVXVZhcOwc360N4nuKs= -github.com/ipfs/go-ipfs-routing v0.1.0/go.mod h1:hYoUkJLyAUKhF58tysKpids8RNDPO42BVMgK5dNsoqY= -github.com/ipfs/go-ipfs-routing v0.2.1/go.mod h1:xiNNiwgjmLqPS1cimvAw6EyB9rkVDbiocA4yY+wRNLM= -github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= -github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= -github.com/ipfs/go-ipfs-util v0.0.2/go.mod h1:CbPtkWJzjLdEcezDns2XYaehFVNXG9zrdrtMecczcsQ= -github.com/ipfs/go-ipld-cbor v0.0.2/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= -github.com/ipfs/go-ipld-cbor v0.0.3/go.mod h1:wTBtrQZA3SoFKMVkp6cn6HMRteIB1VsmHA0AQFOn7Nc= -github.com/ipfs/go-ipld-cbor v0.0.4/go.mod h1:BkCduEx3XBCO6t2Sfo5BaHzuok7hbhdMm9Oh8B2Ftq4= -github.com/ipfs/go-ipld-cbor v0.0.6 h1:pYuWHyvSpIsOOLw4Jy7NbBkCyzLDcl64Bf/LZW7eBQ0= -github.com/ipfs/go-ipld-cbor v0.0.6/go.mod h1:ssdxxaLJPXH7OjF5V4NSjBbcfh+evoR4ukuru0oPXMA= -github.com/ipfs/go-ipld-format v0.0.1/go.mod h1:kyJtbkDALmFHv3QR6et67i35QzO3S0dCDnkOJhcZkms= -github.com/ipfs/go-ipld-format v0.0.2/go.mod h1:4B6+FM2u9OJ9zCV+kSbgFAZlOrv1Hqbf0INGQgiKf9k= -github.com/ipfs/go-ipld-format v0.2.0/go.mod h1:3l3C1uKoadTPbeNfrDi+xMInYKlx2Cvg1BuydPSdzQs= -github.com/ipfs/go-ipld-format v0.3.0/go.mod h1:co/SdBE8h99968X0hViiw1MNlh6fvxxnHpvVLnH7jSM= +github.com/ipfs/go-ipfs-util v0.0.3 h1:2RFdGez6bu2ZlZdI+rWfIdbQb1KudQp3VGwPtdNCmE0= +github.com/ipfs/go-ipfs-util v0.0.3/go.mod h1:LHzG1a0Ig4G+iZ26UUOMjHd+lfM84LZCrn17xAKWBvs= github.com/ipfs/go-ipld-format v0.6.0 h1:VEJlA2kQ3LqFSIm5Vu6eIlSxD/Ze90xtc4Meten1F5U= github.com/ipfs/go-ipld-format v0.6.0/go.mod h1:g4QVMTn3marU3qXchwjpKPKgJv+zF+OlaKMyhJ4LHPg= github.com/ipfs/go-ipld-legacy v0.2.1 h1:mDFtrBpmU7b//LzLSypVrXsD8QxkEWxu5qVxN99/+tk= github.com/ipfs/go-ipld-legacy v0.2.1/go.mod h1:782MOUghNzMO2DER0FlBR94mllfdCJCkTtDtPM51otM= -github.com/ipfs/go-ipns v0.3.0 h1:ai791nTgVo+zTuq2bLvEGmWP1M0A6kGTXUsgv/Yq67A= -github.com/ipfs/go-ipns v0.3.0/go.mod h1:3cLT2rbvgPZGkHJoPO1YMJeh6LtkxopCkKFcio/wE24= -github.com/ipfs/go-libipfs v0.7.0 h1:Mi54WJTODaOL2/ZSm5loi3SwI3jI2OuFWUrQIkJ5cpM= -github.com/ipfs/go-libipfs v0.7.0/go.mod h1:KsIf/03CqhICzyRGyGo68tooiBE2iFbI/rXW7FhAYr0= -github.com/ipfs/go-log v0.0.1/go.mod h1:kL1d2/hzSpI0thNYjiKfjanbVNU+IIGA/WnNESY9leM= -github.com/ipfs/go-log v1.0.2/go.mod h1:1MNjMxe0u6xvJZgeqbJ8vdo2TKaGwZ1a0Bpza+sr2Sk= -github.com/ipfs/go-log v1.0.3/go.mod h1:OsLySYkwIbiSUR/yBTdv1qPtcE4FW3WPWk/ewz9Ru+A= -github.com/ipfs/go-log v1.0.4/go.mod h1:oDCg2FkjogeFOhqqb+N39l2RpTNPL6F/StPkB3kPgcs= github.com/ipfs/go-log v1.0.5 h1:2dOuUCB1Z7uoczMWgAyDck5JLb72zHzrMnGnCNNbvY8= github.com/ipfs/go-log v1.0.5/go.mod h1:j0b8ZoR+7+R99LD9jZ6+AJsrzkPbSXbZfGakb5JPtIo= -github.com/ipfs/go-log/v2 v2.0.2/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.3/go.mod h1:O7P1lJt27vWHhOwQmcFEvlmo49ry2VY2+JfBWFaa9+0= -github.com/ipfs/go-log/v2 v2.0.5/go.mod h1:eZs4Xt4ZUJQFM3DlanGhy7TkwwawCZcSByscwkWG+dw= -github.com/ipfs/go-log/v2 v2.1.1/go.mod h1:2v2nsGfZsvvAJz13SyFzf9ObaqwHiHxsPLEHntrv9KM= github.com/ipfs/go-log/v2 v2.1.3/go.mod h1:/8d0SH3Su5Ooc31QlL1WysJhvyOTDCjcCZ9Axpmri6g= -github.com/ipfs/go-log/v2 v2.3.0/go.mod h1:QqGoj30OTpnKaG/LKTGTxoP2mmQtjVMEnK72gynbe/g= github.com/ipfs/go-log/v2 v2.5.1 h1:1XdUzF7048prq4aBjDQQ4SL5RxftpRGdXhNRwKSAlcY= github.com/ipfs/go-log/v2 v2.5.1/go.mod h1:prSpmC1Gpllc9UYWxDiZDreBYw7zp4Iqp1kOLU9U5UI= -github.com/ipfs/go-merkledag v0.0.6/go.mod h1:QYPdnlvkOg7GnQRofu9XZimC5ZW5Wi3bKys/4GQQfto= -github.com/ipfs/go-merkledag v0.2.3/go.mod h1:SQiXrtSts3KGNmgOzMICy5c0POOpUNQLvB3ClKnBAlk= -github.com/ipfs/go-merkledag v0.3.2/go.mod h1:fvkZNNZixVW6cKSZ/JfLlON5OlgTXNdRLz0p6QG/I2M= -github.com/ipfs/go-merkledag v0.9.0 h1:DFC8qZ96Dz1hMT7dtIpcY524eFFDiEWAF8hNJHWW2pk= -github.com/ipfs/go-merkledag v0.9.0/go.mod h1:bPHqkHt5OZ0p1n3iqPeDiw2jIBkjAytRjS3WSBwjq90= github.com/ipfs/go-metrics-interface v0.0.1 h1:j+cpbjYvu4R8zbleSs36gvB7jR+wsL2fGD6n0jO4kdg= github.com/ipfs/go-metrics-interface v0.0.1/go.mod h1:6s6euYU4zowdslK0GKHmqaIZ3j/b/tL7HTWtJ4VPgWY= -github.com/ipfs/go-path v0.0.7/go.mod h1:6KTKmeRnBXgqrTvzFrPV3CamxcgvXX/4z79tfAd2Sno= -github.com/ipfs/go-peertaskqueue v0.0.4/go.mod h1:03H8fhyeMfKNFWqzYEVyMbcPUeYrqP1MX6Kd+aN+rMQ= -github.com/ipfs/go-peertaskqueue v0.1.0/go.mod h1:Jmk3IyCcfl1W3jTW3YpghSwSEC6IJ3Vzz/jUmWw8Z0U= -github.com/ipfs/go-peertaskqueue v0.7.0/go.mod h1:M/akTIE/z1jGNXMU7kFB4TeSEFvj68ow0Rrb04donIU= github.com/ipfs/go-peertaskqueue v0.8.1 h1:YhxAs1+wxb5jk7RvS0LHdyiILpNmRIRnZVztekOF0pg= github.com/ipfs/go-peertaskqueue v0.8.1/go.mod h1:Oxxd3eaK279FxeydSPPVGHzbwVeHjatZ2GA8XD+KbPU= -github.com/ipfs/go-unixfs v0.2.4/go.mod h1:SUdisfUjNoSDzzhGVxvCL9QO/nKdwXdr+gbMUdqcbYw= -github.com/ipfs/go-unixfs v0.4.5 h1:wj8JhxvV1G6CD7swACwSKYa+NgtdWC1RUit+gFnymDU= -github.com/ipfs/go-verifcid v0.0.1/go.mod h1:5Hrva5KBeIog4A+UpqlaIU+DEstipcJYQQZc0g37pY0= -github.com/ipfs/go-verifcid v0.0.2 h1:XPnUv0XmdH+ZIhLGKg6U2vaPaRDXb9urMyNVCE7uvTs= -github.com/ipfs/interface-go-ipfs-core v0.4.0/go.mod h1:UJBcU6iNennuI05amq3FQ7g0JHUkibHFAfhfUIy927o= -github.com/ipfs/interface-go-ipfs-core v0.10.0 h1:b/psL1oqJcySdQAsIBfW5ZJJkOAsYlhWtC0/Qvr4WiM= -github.com/ipfs/interface-go-ipfs-core v0.10.0/go.mod h1:F3EcmDy53GFkF0H3iEJpfJC320fZ/4G60eftnItrrJ0= +github.com/ipfs/kubo v0.24.0 h1:2BAnH9j6RojzmXwQNnI2Mhj6qzs5c5jzlAzv7N9sph4= github.com/ipld/go-codec-dagpb v1.6.0 h1:9nYazfyu9B1p3NAgfVdpRco3Fs2nFC72DqVsMj6rOcc= github.com/ipld/go-codec-dagpb v1.6.0/go.mod h1:ANzFhfP2uMJxRBr8CE+WQWs5UsNa0pYtmKZ+agnUw9s= -github.com/ipld/go-ipld-prime v0.11.0/go.mod h1:+WIAkokurHmZ/KwzDOMUuoeJgaRQktHtEaLglS3ZeV8= github.com/ipld/go-ipld-prime v0.21.0 h1:n4JmcpOlPDIxBcY037SVfpd1G+Sj1nKZah0m6QH9C2E= github.com/ipld/go-ipld-prime v0.21.0/go.mod h1:3RLqy//ERg/y5oShXXdx5YIp50cFGOanyMctpPjsvxQ= -github.com/jackpal/gateway v1.0.5/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQetPfnjA= -github.com/jackpal/go-nat-pmp v1.0.1/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= -github.com/jbenet/go-cienv v0.0.0-20150120210510-1bb1476777ec/go.mod h1:rGaEvXB4uRSZMmzKNLoXvTu1sfx+1kv/DojUlPrSZGs= github.com/jbenet/go-cienv v0.1.0/go.mod h1:TqNnHUmJgXau0nCzC7kXWeotg3J9W34CUv5Djy1+FlA= -github.com/jbenet/go-temp-err-catcher v0.0.0-20150120210811-aac704a3f4f2/go.mod h1:8GXXJV31xl8whumTzdZsTt3RnUIiPqzkyf7mxToRCMs= github.com/jbenet/go-temp-err-catcher v0.1.0 h1:zpb3ZH6wIE8Shj2sKS+khgRvf7T7RABoLk/+KKHggpk= github.com/jbenet/go-temp-err-catcher v0.1.0/go.mod h1:0kJRvmDZXNMIiJirNPEYfhpPwbGVtZVWC34vc5WLsDk= -github.com/jbenet/goprocess v0.0.0-20160826012719-b497e2f366b8/go.mod h1:Ly/wlsjFq/qrU3Rar62tu1gASgGw6chQbSh/XgIIXCY= -github.com/jbenet/goprocess v0.1.3/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jbenet/goprocess v0.1.4 h1:DRGOFReOMqqDNXwW70QkacFW0YN9QnwLV0Vqk+3oU0o= github.com/jbenet/goprocess v0.1.4/go.mod h1:5yspPrukOVuOLORacaBi858NqyClJPQxYZlqdZVfqY4= github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU= -github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= -github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= -github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= -github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/kami-zh/go-capturer v0.0.0-20171211120116-e492ea43421d/go.mod h1:P2viExyCEfeWGU259JnaQ34Inuec4R38JCyBx2edgD0= -github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= -github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= -github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/koron/go-ssdp v0.0.0-20180514024734-4a0ed625a78b/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= -github.com/koron/go-ssdp v0.0.0-20191105050749-2e1c40ed0b5d/go.mod h1:5Ky9EC2xfoUKUor0Hjgi2BJhCSXJfMOFlmyYrVKGQMk= +github.com/klauspost/compress v1.17.2 h1:RlWWUY/Dr4fL8qk9YG7DTZ7PDgME2V4csBXA8L/ixi4= +github.com/klauspost/compress v1.17.2/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/cpuid/v2 v2.2.6 h1:ndNyv040zDGIDh8thGkXYjnFtiN02M1PVVF+JE/48xc= +github.com/klauspost/cpuid/v2 v2.2.6/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0= github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/lens-vm/lens/host-go v0.0.0-20230729032926-5acb4df9bd25 h1:hC67vWtvuDnw8w6u4jLFoj3SOH92/4Lq8SCR++L7njw= github.com/lens-vm/lens/host-go v0.0.0-20230729032926-5acb4df9bd25/go.mod h1:rDE4oJUIAQoXX9heUg8VOQf5LscRWj0BeE5mbGqOs3E= -github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= -github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= -github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= -github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= github.com/libp2p/go-buffer-pool v0.1.0 h1:oK4mSFcQz7cTQIfqbe4MIj9gLW+mnanjyFtc6cdF0Y8= github.com/libp2p/go-buffer-pool v0.1.0/go.mod h1:N+vh8gMqimBzdKkSMVuydVDq+UV5QTWy5HSiZacSbPg= github.com/libp2p/go-cidranger v1.1.0 h1:ewPN8EZ0dd1LSnrtuwd4709PXVcITVeuwbag38yPW7c= github.com/libp2p/go-cidranger v1.1.0/go.mod h1:KWZTfSr+r9qEo9OkI9/SIEeAtw+NNoU0dXIXt15Okic= -github.com/libp2p/go-conn-security v0.0.1/go.mod h1:bGmu51N0KU9IEjX7kl2PQjgZa40JQWnayTvNMgD/vyk= -github.com/libp2p/go-conn-security-multistream v0.0.2/go.mod h1:nc9vud7inQ+d6SO0I/6dSWrdMnHnzZNHeyUQqrAJulE= -github.com/libp2p/go-conn-security-multistream v0.1.0/go.mod h1:aw6eD7LOsHEX7+2hJkDxw1MteijaVcI+/eP2/x3J1xc= -github.com/libp2p/go-conn-security-multistream v0.2.0/go.mod h1:hZN4MjlNetKD3Rq5Jb/P5ohUnFLNzEAR4DLSzpn2QLU= -github.com/libp2p/go-conn-security-multistream v0.2.1/go.mod h1:cR1d8gA0Hr59Fj6NhaTpFhJZrjSYuNmhpT2r25zYR70= -github.com/libp2p/go-eventbus v0.1.0/go.mod h1:vROgu5cs5T7cv7POWlWxBaVLxfSegC5UGQf8A2eEmx4= -github.com/libp2p/go-eventbus v0.2.1/go.mod h1:jc2S4SoEVPP48H9Wpzm5aiGwUCBMfGhVhhBjyhhCJs8= -github.com/libp2p/go-flow-metrics v0.0.1/go.mod h1:Iv1GH0sG8DtYN3SVJ2eG221wMiNpZxBdp967ls1g+k8= -github.com/libp2p/go-flow-metrics v0.0.3/go.mod h1:HeoSNUrOJVK1jEpDqVEiUOIXqhbnS27omG0uWU5slZs= github.com/libp2p/go-flow-metrics v0.1.0 h1:0iPhMI8PskQwzh57jB9WxIuIOQ0r+15PChFGkx3Q3WM= github.com/libp2p/go-flow-metrics v0.1.0/go.mod h1:4Xi8MX8wj5aWNDAZttg6UPmc0ZrnFNsMtpsYUClFtro= -github.com/libp2p/go-libp2p v0.0.30/go.mod h1:XWT8FGHlhptAv1+3V/+J5mEpzyui/5bvFsNuWYs611A= -github.com/libp2p/go-libp2p v0.1.0/go.mod h1:6D/2OBauqLUoqcADOJpn9WbKqvaM07tDw68qHM0BxUM= -github.com/libp2p/go-libp2p v0.1.1/go.mod h1:I00BRo1UuUSdpuc8Q2mN7yDF/oTUTRAX6JWpTiK9Rp8= -github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZkfEI5sT54= -github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= -github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= -github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.14.3/go.mod h1:d12V4PdKbpL0T1/gsUNN8DfgMuRPDX8bS2QxCZlwRH0= -github.com/libp2p/go-libp2p v0.29.2 h1:uPw/c8hOxoLP/KhFnzlc5Ejqf+OmAL1dwIsqE31WBtY= -github.com/libp2p/go-libp2p v0.29.2/go.mod h1:OU7nSq0aEZMsV2wY8nXn1+XNNt9q2UiR8LjW3Kmp2UE= +github.com/libp2p/go-libp2p v0.32.1 h1:wy1J4kZIZxOaej6NveTWCZmHiJ/kY7GoAqXgqNCnPps= +github.com/libp2p/go-libp2p v0.32.1/go.mod h1:hXXC3kXPlBZ1eu8Q2hptGrMB4mZ3048JUoS4EKaHW5c= github.com/libp2p/go-libp2p-asn-util v0.3.0 h1:gMDcMyYiZKkocGXDQ5nsUQyquC9+H+iLEQHwOCZ7s8s= github.com/libp2p/go-libp2p-asn-util v0.3.0/go.mod h1:B1mcOrKUE35Xq/ASTmQ4tN3LNzVVaMNmq2NACuqyB9w= -github.com/libp2p/go-libp2p-autonat v0.0.6/go.mod h1:uZneLdOkZHro35xIhpbtTzLlgYturpu4J5+0cZK3MqE= -github.com/libp2p/go-libp2p-autonat v0.1.0/go.mod h1:1tLf2yXxiE/oKGtDwPYWTSYG3PtvYlJmg7NeVtPRqH8= -github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= -github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= -github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= -github.com/libp2p/go-libp2p-autonat v0.2.2/go.mod h1:HsM62HkqZmHR2k1xgX34WuWDzk/nBwNHoeyyT4IWV6A= -github.com/libp2p/go-libp2p-autonat v0.4.2/go.mod h1:YxaJlpr81FhdOv3W3BTconZPfhaYivRdf53g+S2wobk= -github.com/libp2p/go-libp2p-blankhost v0.0.1/go.mod h1:Ibpbw/7cPPYwFb7PACIWdvxxv0t0XCCI10t7czjAjTc= -github.com/libp2p/go-libp2p-blankhost v0.1.1/go.mod h1:pf2fvdLJPsC1FsVrNP3DUUvMzUts2dsLLBEpo1vW1ro= -github.com/libp2p/go-libp2p-blankhost v0.1.4/go.mod h1:oJF0saYsAXQCSfDq254GMNmLNz6ZTHTOvtF4ZydUvwU= -github.com/libp2p/go-libp2p-blankhost v0.2.0/go.mod h1:eduNKXGTioTuQAUcZ5epXi9vMl+t4d8ugUBRQ4SqaNQ= -github.com/libp2p/go-libp2p-circuit v0.0.9/go.mod h1:uU+IBvEQzCu953/ps7bYzC/D/R0Ho2A9LfKVVCatlqU= -github.com/libp2p/go-libp2p-circuit v0.1.0/go.mod h1:Ahq4cY3V9VJcHcn1SBXjr78AbFkZeIRmfunbA7pmFh8= -github.com/libp2p/go-libp2p-circuit v0.1.4/go.mod h1:CY67BrEjKNDhdTk8UgBX1Y/H5c3xkAcs3gnksxY7osU= -github.com/libp2p/go-libp2p-circuit v0.2.1/go.mod h1:BXPwYDN5A8z4OEY9sOfr2DUQMLQvKt/6oku45YUmjIo= -github.com/libp2p/go-libp2p-circuit v0.4.0/go.mod h1:t/ktoFIUzM6uLQ+o1G6NuBl2ANhBKN9Bc8jRIk31MoA= -github.com/libp2p/go-libp2p-connmgr v0.4.0 h1:q/KZUS1iMDIQckMZarMYwhQisJqiFPHAVC1c4DR3hDE= -github.com/libp2p/go-libp2p-connmgr v0.4.0/go.mod h1:exFQQm19PFAx+QuJmBPw4MM58QejzPJRFFFYnNmgi2w= -github.com/libp2p/go-libp2p-core v0.0.1/go.mod h1:g/VxnTZ/1ygHxH3dKok7Vno1VfpvGcGip57wjTU4fco= -github.com/libp2p/go-libp2p-core v0.0.2/go.mod h1:9dAcntw/n46XycV4RnlBq3BpgrmyUi9LuoTNdPrbUco= -github.com/libp2p/go-libp2p-core v0.0.3/go.mod h1:j+YQMNz9WNSkNezXOsahp9kwZBKBvxLpKD316QWSJXE= -github.com/libp2p/go-libp2p-core v0.0.4/go.mod h1:jyuCQP356gzfCFtRKyvAbNkyeuxb7OlyhWZ3nls5d2I= -github.com/libp2p/go-libp2p-core v0.2.0/go.mod h1:X0eyB0Gy93v0DZtSYbEM7RnMChm9Uv3j7yRXjO77xSI= -github.com/libp2p/go-libp2p-core v0.2.2/go.mod h1:8fcwTbsG2B+lTgRJ1ICZtiM5GWCWZVoVrLaDRvIRng0= -github.com/libp2p/go-libp2p-core v0.2.4/go.mod h1:STh4fdfa5vDYr0/SzYYeqnt+E6KfEV5VxfIrm0bcI0g= -github.com/libp2p/go-libp2p-core v0.3.0/go.mod h1:ACp3DmS3/N64c2jDzcV429ukDpicbL6+TrrxANBjPGw= -github.com/libp2p/go-libp2p-core v0.3.1/go.mod h1:thvWy0hvaSBhnVBaW37BvzgVV68OUhgJJLAa6almrII= -github.com/libp2p/go-libp2p-core v0.4.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.0/go.mod h1:49XGI+kc38oGVwqSBhDEwytaAxgZasHhFfQKibzTls0= -github.com/libp2p/go-libp2p-core v0.5.1/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.4/go.mod h1:uN7L2D4EvPCvzSH5SrhR72UWbnSGpt5/a35Sm4upn4Y= -github.com/libp2p/go-libp2p-core v0.5.5/go.mod h1:vj3awlOr9+GMZJFH9s4mpt9RHHgGqeHCopzbYKZdRjM= -github.com/libp2p/go-libp2p-core v0.5.6/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.5.7/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.6.0/go.mod h1:txwbVEhHEXikXn9gfC7/UDDw7rkxuX0bJvM49Ykaswo= -github.com/libp2p/go-libp2p-core v0.6.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.20.0 h1:PGKM74+T+O/FaZNARNW32i90RMBHCcgd/hkum2UQ5eY= -github.com/libp2p/go-libp2p-core v0.20.0/go.mod h1:6zR8H7CvQWgYLsbG4on6oLNSGcyKaYFSEYyDt51+bIY= -github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= -github.com/libp2p/go-libp2p-crypto v0.0.2/go.mod h1:eETI5OUfBnvARGOHrJz2eWNyTUxEGZnBxMcbUjfIj4I= -github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= -github.com/libp2p/go-libp2p-discovery v0.0.5/go.mod h1:YtF20GUxjgoKZ4zmXj8j3Nb2TUSBHFlOCetzYdbZL5I= -github.com/libp2p/go-libp2p-discovery v0.1.0/go.mod h1:4F/x+aldVHjHDHuX85x1zWoFTGElt8HnoDzwkFZm29g= -github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= -github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= github.com/libp2p/go-libp2p-gostream v0.6.0 h1:QfAiWeQRce6pqnYfmIVWJFXNdDyfiR/qkCnjyaZUPYU= github.com/libp2p/go-libp2p-gostream v0.6.0/go.mod h1:Nywu0gYZwfj7Jc91PQvbGU8dIpqbQQkjWgDuOrFaRdA= -github.com/libp2p/go-libp2p-host v0.0.1/go.mod h1:qWd+H1yuU0m5CwzAkvbSjqKairayEHdR5MMl7Cwa7Go= -github.com/libp2p/go-libp2p-host v0.0.3/go.mod h1:Y/qPyA6C8j2coYyos1dfRm0I8+nvd4TGrDGt4tA7JR8= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.1/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.4/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= -github.com/libp2p/go-libp2p-interface-connmgr v0.0.5/go.mod h1:GarlRLH0LdeWcLnYM/SaBykKFl9U5JFnbBGruAk/D5k= -github.com/libp2p/go-libp2p-interface-pnet v0.0.1/go.mod h1:el9jHpQAXK5dnTpKA4yfCNBZXvrzdOU75zz+C6ryp3k= -github.com/libp2p/go-libp2p-kad-dht v0.23.0 h1:sxE6LxLopp79eLeV695n7+c77V/Vn4AMF28AdM/XFqM= -github.com/libp2p/go-libp2p-kad-dht v0.23.0/go.mod h1:oO5N308VT2msnQI6qi5M61wzPmJYg7Tr9e16m5n7uDU= -github.com/libp2p/go-libp2p-kbucket v0.6.0 h1:1uyqIdE6X7ihtbNg+vRc9EQEmZPEBaehvJ2W14rUrRQ= -github.com/libp2p/go-libp2p-kbucket v0.6.0/go.mod h1:efnPrfoP+WT/ONcC5eB0iADCDIJFXauXhylgJYO+VWw= -github.com/libp2p/go-libp2p-loggables v0.0.1/go.mod h1:lDipDlBNYbpyqyPX/KcoO+eq0sJYEVR2JgOexcivchg= -github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= -github.com/libp2p/go-libp2p-metrics v0.0.1/go.mod h1:jQJ95SXXA/K1VZi13h52WZMa9ja78zjyy5rspMsC/08= -github.com/libp2p/go-libp2p-mplex v0.1.1/go.mod h1:KUQWpGkCzfV7UIpi8SKsAVxyBgz1c9R5EvxgnwLsb/I= -github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3g+OtR+EMMODbKo= -github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= -github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= -github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= -github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= -github.com/libp2p/go-libp2p-nat v0.0.4/go.mod h1:N9Js/zVtAXqaeT99cXgTV9e75KpnWCvVOiGzlcHmBbY= -github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= -github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= -github.com/libp2p/go-libp2p-net v0.0.1/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= -github.com/libp2p/go-libp2p-net v0.0.2/go.mod h1:Yt3zgmlsHOgUWSXmt5V/Jpz9upuJBE8EgNU9DrCcR8c= -github.com/libp2p/go-libp2p-netutil v0.0.1/go.mod h1:GdusFvujWZI9Vt0X5BKqwWWmZFxecf9Gt03cKxm2f/Q= -github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= -github.com/libp2p/go-libp2p-peer v0.1.1/go.mod h1:jkF12jGB4Gk/IOo+yomm+7oLWxF278F7UnrYUQ1Q8es= -github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= -github.com/libp2p/go-libp2p-peerstore v0.0.1/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= -github.com/libp2p/go-libp2p-peerstore v0.0.6/go.mod h1:RabLyPVJLuNQ+GFyoEkfi8H4Ti6k/HtZJ7YKgtSq+20= -github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= -github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= -github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnqhVnMNQZo9nkSCuAbnQ= -github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= -github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.6.0 h1:HJminhQSGISBIRb93N6WK3t6Fa8OOTnHd/VBjL4mY5A= -github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= -github.com/libp2p/go-libp2p-protocol v0.0.1/go.mod h1:Af9n4PiruirSDjHycM1QuiMi/1VZNHYcK8cLgFJLZ4s= -github.com/libp2p/go-libp2p-protocol v0.1.0/go.mod h1:KQPHpAabB57XQxGrXCNvbL6UEXfQqUgC/1adR2Xtflk= -github.com/libp2p/go-libp2p-pubsub v0.9.3 h1:ihcz9oIBMaCK9kcx+yHWm3mLAFBMAUsM4ux42aikDxo= -github.com/libp2p/go-libp2p-pubsub v0.9.3/go.mod h1:RYA7aM9jIic5VV47WXu4GkcRxRhrdElWf8xtyli+Dzc= -github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= -github.com/libp2p/go-libp2p-record v0.0.1/go.mod h1:grzqg263Rug/sRex85QrDOLntdFAymLDLm7lxMgU79Q= -github.com/libp2p/go-libp2p-record v0.1.0/go.mod h1:ujNc8iuE5dlKWVy6wuL6dd58t0n7xI4hAIl8pE6wu5Q= +github.com/libp2p/go-libp2p-kad-dht v0.25.1 h1:ofFNrf6MMEy4vi3R1VbJ7LOcTn3Csh0cDcaWHTxtWNA= +github.com/libp2p/go-libp2p-kad-dht v0.25.1/go.mod h1:6za56ncRHYXX4Nc2vn8z7CZK0P4QiMcrn77acKLM2Oo= +github.com/libp2p/go-libp2p-kbucket v0.6.3 h1:p507271wWzpy2f1XxPzCQG9NiN6R6lHL9GiSErbQQo0= +github.com/libp2p/go-libp2p-kbucket v0.6.3/go.mod h1:RCseT7AH6eJWxxk2ol03xtP9pEHetYSPXOaJnOiD8i0= +github.com/libp2p/go-libp2p-pubsub v0.10.0 h1:wS0S5FlISavMaAbxyQn3dxMOe2eegMfswM471RuHJwA= +github.com/libp2p/go-libp2p-pubsub v0.10.0/go.mod h1:1OxbaT/pFRO5h+Dpze8hdHQ63R0ke55XTs6b6NwLLkw= github.com/libp2p/go-libp2p-record v0.2.0 h1:oiNUOCWno2BFuxt3my4i1frNrt7PerzB3queqa1NkQ0= github.com/libp2p/go-libp2p-record v0.2.0/go.mod h1:I+3zMkvvg5m2OcSdoL0KPljyJyvNDFGKX7QdlpYUcwk= -github.com/libp2p/go-libp2p-routing v0.0.1/go.mod h1:N51q3yTr4Zdr7V8Jt2JIktVU+3xBBylx1MZeVA6t1Ys= -github.com/libp2p/go-libp2p-routing-helpers v0.7.0 h1:sirOYVD0wGWjkDwHZvinunIpaqPLBXkcnXApVHwZFGA= -github.com/libp2p/go-libp2p-routing-helpers v0.7.0/go.mod h1:R289GUxUMzRXIbWGSuUUTPrlVJZ3Y/pPz495+qgXJX8= -github.com/libp2p/go-libp2p-secio v0.0.3/go.mod h1:hS7HQ00MgLhRO/Wyu1bTX6ctJKhVpm+j2/S2A5UqYb0= -github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= -github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= -github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= -github.com/libp2p/go-libp2p-secio v0.2.2/go.mod h1:wP3bS+m5AUnFA+OFO7Er03uO1mncHG0uVwGrwvjYlNY= -github.com/libp2p/go-libp2p-swarm v0.0.6/go.mod h1:s5GZvzg9xXe8sbeESuFpjt8CJPTCa8mhEusweJqyFy8= -github.com/libp2p/go-libp2p-swarm v0.1.0/go.mod h1:wQVsCdjsuZoc730CgOvh5ox6K8evllckjebkdiY5ta4= -github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaTNyBcHImCxRpPKU= -github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= -github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= -github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= -github.com/libp2p/go-libp2p-testing v0.0.1/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= -github.com/libp2p/go-libp2p-testing v0.1.0/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.1/go.mod h1:xaZWMJrPUM5GlDBxCeGUi7kI4eqnjVyavGroI2nxEM0= -github.com/libp2p/go-libp2p-testing v0.1.2-0.20200422005655-8775583591d8/go.mod h1:Qy8sAncLKpwXtS2dSnDOP8ktexIAHKu+J+pnZOFZLTc= -github.com/libp2p/go-libp2p-testing v0.3.0/go.mod h1:efZkql4UZ7OVsEfaxNHZPzIehtsBXMrXnCfJIgDti5g= -github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= +github.com/libp2p/go-libp2p-routing-helpers v0.7.3 h1:u1LGzAMVRK9Nqq5aYDVOiq/HaB93U9WWczBzGyAC5ZY= +github.com/libp2p/go-libp2p-routing-helpers v0.7.3/go.mod h1:cN4mJAD/7zfPKXBcs9ze31JGYAZgzdABEm+q/hkswb8= github.com/libp2p/go-libp2p-testing v0.12.0 h1:EPvBb4kKMWO29qP4mZGyhVzUyR25dvfUIK5WDu6iPUA= -github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-transport v0.0.1/go.mod h1:UzbUs9X+PHOSw7S3ZmeOxfnwaQY5vGDzZmKPod3N3tk= -github.com/libp2p/go-libp2p-transport v0.0.5/go.mod h1:StoY3sx6IqsP6XKoabsPnHCwqKXWUMWU7Rfcsubee/A= -github.com/libp2p/go-libp2p-transport-upgrader v0.0.4/go.mod h1:RGq+tupk+oj7PzL2kn/m1w6YXxcIAYJYeI90h6BGgUc= -github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= -github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= -github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= -github.com/libp2p/go-libp2p-yamux v0.1.2/go.mod h1:xUoV/RmYkg6BW/qGxA9XJyg+HzXFYkeXbnhjmnYzKp8= -github.com/libp2p/go-libp2p-yamux v0.1.3/go.mod h1:VGSQVrqkh6y4nm0189qqxMtvyBft44MOYYPpYKXiVt4= -github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= -github.com/libp2p/go-libp2p-yamux v0.2.1/go.mod h1:1FBXiHDk1VyRM1C0aez2bCfHQ4vMZKkAQzZbkSQt5fI= -github.com/libp2p/go-libp2p-yamux v0.2.2/go.mod h1:lIohaR0pT6mOt0AZ0L2dFze9hds9Req3OfS+B+dv4qw= -github.com/libp2p/go-libp2p-yamux v0.2.5/go.mod h1:Zpgj6arbyQrmZ3wxSZxfBmbdnWtbZ48OpsfmQVTErwA= -github.com/libp2p/go-libp2p-yamux v0.2.7/go.mod h1:X28ENrBMU/nm4I3Nx4sZ4dgjZ6VhLEn0XhIoZ5viCwU= -github.com/libp2p/go-libp2p-yamux v0.2.8/go.mod h1:/t6tDqeuZf0INZMTgd0WxIRbtK2EzI2h7HbFm9eAKI4= -github.com/libp2p/go-libp2p-yamux v0.4.0/go.mod h1:+DWDjtFMzoAwYLVkNZftoucn7PelNoy5nm3tZ3/Zw30= -github.com/libp2p/go-libp2p-yamux v0.5.0/go.mod h1:AyR8k5EzyM2QN9Bbdg6X1SkVVuqLwTGf0L4DFq9g6po= -github.com/libp2p/go-libp2p-yamux v0.5.4/go.mod h1:tfrXbyaTqqSU654GTvK3ocnSZL3BuHoeTSqhcel1wsE= -github.com/libp2p/go-maddr-filter v0.0.1/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.4/go.mod h1:6eT12kSQMA9x2pvFQa+xesMKUBlj9VImZbj3B9FBH/Q= -github.com/libp2p/go-maddr-filter v0.0.5/go.mod h1:Jk+36PMfIqCJhAnaASRH83bdAvfDRp/w6ENFaC9bG+M= -github.com/libp2p/go-maddr-filter v0.1.0/go.mod h1:VzZhTXkMucEGGEOSKddrwGiOv0tUhgnKqNEmIAz/bPU= -github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= -github.com/libp2p/go-mplex v0.0.4/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTWe7l4Yd0= -github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= -github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= -github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.3/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.4/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= -github.com/libp2p/go-msgio v0.0.6/go.mod h1:4ecVB6d9f4BDSL5fqvPiC4A3KivjWn+Venn/1ALLMWA= github.com/libp2p/go-msgio v0.3.0 h1:mf3Z8B1xcFN314sWX+2vOTShIE0Mmn2TXn3YCUQGNj0= github.com/libp2p/go-msgio v0.3.0/go.mod h1:nyRM819GmVaF9LX3l03RMh10QdOroF++NBbxAb0mmDM= -github.com/libp2p/go-nat v0.0.3/go.mod h1:88nUEt0k0JD45Bk93NIwDqjlhiOwOoV36GchpcVc1yI= -github.com/libp2p/go-nat v0.0.4/go.mod h1:Nmw50VAvKuk38jUBcmNh6p9lUJLoODbJRvYAa/+KSDo= -github.com/libp2p/go-nat v0.0.5/go.mod h1:B7NxsVNPZmRLvMOwiEO1scOSyjA56zxYAGv1yQgRkEU= github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk= github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk= -github.com/libp2p/go-netroute v0.1.2/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.3/go.mod h1:jZLDV+1PE8y5XxBySEBgbuVAXbhtuHSdmLPL2n9MKbk= -github.com/libp2p/go-netroute v0.1.5/go.mod h1:V1SR3AaECRkEQCoFFzYwVYWvYIEtlxx89+O3qcpCl4A= -github.com/libp2p/go-netroute v0.1.6/go.mod h1:AqhkMh0VuWmfgtxKPp3Oc1LdU5QSWS7wl0QLhSZqXxQ= github.com/libp2p/go-netroute v0.2.1 h1:V8kVrpD8GK0Riv15/7VN6RbUQ3URNZVosw7H2v9tksU= github.com/libp2p/go-netroute v0.2.1/go.mod h1:hraioZr0fhBjG0ZRXJJ6Zj2IVEVNx6tDTFQfSmcq7mQ= -github.com/libp2p/go-openssl v0.0.2/go.mod h1:v8Zw2ijCSWBQi8Pq5GAixw6DbFfa9u6VIYDXnvOXkc0= -github.com/libp2p/go-openssl v0.0.3/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.4/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.5/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-openssl v0.0.7/go.mod h1:unDrJpgy3oFr+rqXsarWifmJuNnJR4chtO1HmaZjggc= -github.com/libp2p/go-reuseport v0.0.1/go.mod h1:jn6RmB1ufnQwl0Q1f+YxAj8isJgDCQzaaxIFYDhcYEA= -github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= -github.com/libp2p/go-reuseport v0.3.0 h1:iiZslO5byUYZEg9iCwJGf5h+sf1Agmqx2V2FDjPyvUw= -github.com/libp2p/go-reuseport v0.3.0/go.mod h1:laea40AimhtfEqysZ71UpYj4S+R9VpH8PgqLo7L+SwI= -github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= -github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= -github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= -github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.0/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-sockaddr v0.1.1/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= -github.com/libp2p/go-stream-muxer v0.0.1/go.mod h1:bAo8x7YkSpadMTbtTaxGVHWUQsR/l5MEaHbKaliuT14= -github.com/libp2p/go-stream-muxer v0.1.0/go.mod h1:8JAVsjeRBCWwPoZeH0W1imLOcriqXJyFvB0mR4A04sQ= -github.com/libp2p/go-stream-muxer-multistream v0.1.1/go.mod h1:zmGdfkQ1AzOECIAcccoL8L//laqawOsO03zX8Sa+eGw= -github.com/libp2p/go-stream-muxer-multistream v0.2.0/go.mod h1:j9eyPol/LLRqT+GPLSxvimPhNph4sfYfMoDPd7HkzIc= -github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt64gFJUXEryejzNb0lisTt+fAMJA= -github.com/libp2p/go-tcp-transport v0.0.4/go.mod h1:+E8HvC8ezEVOxIo3V5vCK9l1y/19K427vCzQ+xHKH/o= -github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= -github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= -github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.3/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= -github.com/libp2p/go-testutil v0.0.1/go.mod h1:iAcJc/DKJQanJ5ws2V+u5ywdL2n12X1WbbEG+Jjy69I= -github.com/libp2p/go-testutil v0.1.0/go.mod h1:81b2n5HypcVyrCg/MJx4Wgfp/VHojytjVe/gLzZ2Ehc= -github.com/libp2p/go-ws-transport v0.0.5/go.mod h1:Qbl4BxPfXXhhd/o0wcrgoaItHqA9tnZjoFZnxykuaXU= -github.com/libp2p/go-ws-transport v0.1.0/go.mod h1:rjw1MG1LU9YDC6gzmwObkPd/Sqwhw7yT74kj3raBFuo= -github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= -github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= -github.com/libp2p/go-yamux v1.2.1/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.2.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.5/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= -github.com/libp2p/go-yamux v1.3.7/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.0/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= -github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= +github.com/libp2p/go-reuseport v0.4.0 h1:nR5KU7hD0WxXCJbmw7r2rhRYruNRl2koHw8fQscQm2s= +github.com/libp2p/go-reuseport v0.4.0/go.mod h1:ZtI03j/wO5hZVDFo2jKywN6bYKWLOy8Se6DrI2E1cLU= github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCypkQ= github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/libp2p/zeroconf/v2 v2.2.0 h1:Cup06Jv6u81HLhIj1KasuNM/RHHrJ8T7wOTS4+Tv53Q= -github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= -github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= -github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= -github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= -github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= -github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= -github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= -github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= -github.com/mattn/go-isatty v0.0.13/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= -github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA= -github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= -github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4= -github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.12/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.28/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= -github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= -github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= +github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE= +github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c h1:bzE/A84HN25pxAuk9Eej1Kz9OUelF97nAc82bDquQI8= github.com/mikioh/tcp v0.0.0-20190314235350-803a9b46060c/go.mod h1:0SQS9kMwD2VsyFEB++InYyBJroV/FRmBgcydeSUcJms= github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b h1:z78hV3sbSMAUoyUMM0I83AUIT6Hu17AWfgjzIbtrYFc= @@ -930,229 +421,113 @@ github.com/mikioh/tcpinfo v0.0.0-20190314235526-30a79bb1804b/go.mod h1:lxPUiZwKo github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc h1:PTfri+PuQmWDqERdnNMiD9ZejrlswWrCpBEZgWOiTrc= github.com/mikioh/tcpopt v0.0.0-20190314235656-172688c1accc/go.mod h1:cGKTAVKx4SxOuR/czcZ/E2RSJ3sfHs8FpHhQ5CWMf9s= github.com/minio/blake2b-simd v0.0.0-20160723061019-3f5f724cb5b1/go.mod h1:pD8RvIylQ358TN4wwqatJ8rNavkEINozVn9DtGI3dfQ= -github.com/minio/sha256-simd v0.0.0-20190131020904-2d45a736cd16/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.0.0-20190328051042-05b4dd3047e5/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= -github.com/minio/sha256-simd v0.1.0/go.mod h1:2FMWW+8GMoPweT6+pI63m9YE3Lmw4J71hV56Chs1E/U= github.com/minio/sha256-simd v0.1.1-0.20190913151208-6de447530771/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/sha256-simd v1.0.0/go.mod h1:OuYzVNI5vcoYIAmbIvHPl3N3jUzVedXbKy5RFepssQM= github.com/minio/sha256-simd v1.0.1 h1:6kaan5IFmwTNynnKKpDHe6FWHohJOHhCPchzK49dzMM= github.com/minio/sha256-simd v1.0.1/go.mod h1:Pz6AKMiUdngCLpeTL/RJY1M9rUuPMYujV5xJjtbRSN8= -github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= -github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= -github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= -github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/mr-tron/base58 v1.1.0/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= -github.com/mr-tron/base58 v1.1.1/go.mod h1:xcD2VGqlgYjBdcBLw+TuYLr8afG+Hj8g2eTVqeSzSU8= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 h1:RWengNIwukTxcDr9M+97sNutRR1RKhG96O6jWumTTnw= +github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= -github.com/multiformats/go-base32 v0.0.3/go.mod h1:pLiuGC8y0QR3Ue4Zug5UzK9LjgbkL8NSQj0zQ5Nz/AA= github.com/multiformats/go-base32 v0.1.0 h1:pVx9xoSPqEIQG8o+UbAe7DNi51oej1NtK+aGkbLYxPE= github.com/multiformats/go-base32 v0.1.0/go.mod h1:Kj3tFY6zNr+ABYMqeUNeGvkIC/UYgtWibDcT0rExnbI= -github.com/multiformats/go-base36 v0.1.0/go.mod h1:kFGE83c6s80PklsHO9sRn2NCoffoRdUUOENyW/Vv6sM= github.com/multiformats/go-base36 v0.2.0 h1:lFsAbNOGeKtuKozrtBsAkSVhv1p9D0/qedU9rQyccr0= github.com/multiformats/go-base36 v0.2.0/go.mod h1:qvnKE++v+2MWCfePClUEjE78Z7P2a1UV0xHgWc0hkp4= -github.com/multiformats/go-multiaddr v0.0.1/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.2/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.0.4/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= -github.com/multiformats/go-multiaddr v0.1.0/go.mod h1:xKVEak1K9cS1VdmPZW3LSIb6lgmoS58qz/pzqmAxV44= github.com/multiformats/go-multiaddr v0.1.1/go.mod h1:aMKBKNEYmzmDmxfX88/vz+J5IU55txyt0p4aiWVohjo= github.com/multiformats/go-multiaddr v0.2.0/go.mod h1:0nO36NvPpyV4QzvTLi/lafl2y95ncPj0vFwVF6k6wJ4= -github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z7tZ04iMk5wP4QMGGE= -github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= -github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= -github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= -github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= -github.com/multiformats/go-multiaddr v0.10.1 h1:HghtFrWyZEPrpTvgAMFJi6gFdgHfs2cb0pyfDsk+lqU= -github.com/multiformats/go-multiaddr v0.10.1/go.mod h1:jLEZsA61rwWNZQTHHnqq2HNa+4os/Hz54eqiRnsRqYQ= -github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= -github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= +github.com/multiformats/go-multiaddr v0.12.0 h1:1QlibTFkoXJuDjjYsMHhE73TnzJQl8FSWatk/0gxGzE= +github.com/multiformats/go-multiaddr v0.12.0/go.mod h1:WmZXgObOQOYp9r3cslLlppkrz1FYSHmE834dfz/lWu8= github.com/multiformats/go-multiaddr-dns v0.3.1 h1:QgQgR+LQVt3NPTjbrLLpsaT2ufAA2y0Mkk+QRVJbW3A= github.com/multiformats/go-multiaddr-dns v0.3.1/go.mod h1:G/245BRQ6FJGmryJCrOuTdB37AMA5AMOVuO6NY3JwTk= -github.com/multiformats/go-multiaddr-fmt v0.0.1/go.mod h1:aBYjqL4T/7j4Qx+R73XSv/8JsgnRFlf0w2KGLCmXl3Q= github.com/multiformats/go-multiaddr-fmt v0.1.0 h1:WLEFClPycPkp4fnIzoFoV9FVd49/eQsuaL3/CWe167E= github.com/multiformats/go-multiaddr-fmt v0.1.0/go.mod h1:hGtDIW4PU4BqJ50gW2quDuPVjyWNZxToGUh/HwTZYJo= -github.com/multiformats/go-multiaddr-net v0.0.1/go.mod h1:nw6HSxNmCIQH27XPGBuX+d1tnvM7ihcFwHMSstNAVUU= -github.com/multiformats/go-multiaddr-net v0.1.0/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.1/go.mod h1:5JNbcfBOP4dnhoZOv10JJVkJO0pCCEf8mTnipAo2UZQ= -github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQKME1qHfpYmyIjFVsSOY6Y= -github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= -github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= -github.com/multiformats/go-multibase v0.0.3/go.mod h1:5+1R4eQrT3PkYZ24C3W2Ue2tPwIdYQD509ZjSb5y9Oc= github.com/multiformats/go-multibase v0.2.0 h1:isdYCVLvksgWlMW9OZRYJEa9pZETFivncJHmHnnd87g= github.com/multiformats/go-multibase v0.2.0/go.mod h1:bFBZX4lKCA/2lyOFSAoKH5SS6oPyjtnzK/XTFDPkNuk= github.com/multiformats/go-multicodec v0.9.0 h1:pb/dlPnzee/Sxv/j4PmkDRxCOi3hXTz3IbPKOXWJkmg= github.com/multiformats/go-multicodec v0.9.0/go.mod h1:L3QTQvMIaVBkXOXXtVmYE+LI16i14xuaojr/H7Ai54k= -github.com/multiformats/go-multihash v0.0.1/go.mod h1:w/5tugSrLEbWqlcgJabL3oHFKTwfvkofsjW2Qa1ct4U= -github.com/multiformats/go-multihash v0.0.5/go.mod h1:lt/HCbqlQwlPBz7lv0sQCdtfcMtlJvakRUn/0Ual8po= github.com/multiformats/go-multihash v0.0.8/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.10/go.mod h1:YSLudS+Pi8NHE7o6tb3D8vrpKa63epEDmG8nTduyAew= -github.com/multiformats/go-multihash v0.0.13/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.14/go.mod h1:VdAWLKTwram9oKAatUcLxBNUjdtcVwxObEQBtRfuyjc= -github.com/multiformats/go-multihash v0.0.15/go.mod h1:D6aZrWNLFTV/ynMpKsNtB40mJzmCl4jb1alC0OvHiHg= github.com/multiformats/go-multihash v0.2.3 h1:7Lyc8XfX/IY2jWb/gI7JP+o7JEq9hOa7BFvVU9RSh+U= github.com/multiformats/go-multihash v0.2.3/go.mod h1:dXgKXCXjBzdscBLk9JkjINiEsCKRVch90MdaGiKsvSM= -github.com/multiformats/go-multistream v0.0.1/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.0.4/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.0/go.mod h1:fJTiDfXJVmItycydCnNx4+wSzZ5NwG2FEVAI30fiovg= -github.com/multiformats/go-multistream v0.1.1/go.mod h1:KmHZ40hzVxiaiwlj3MEbYgK9JFk2/9UktWZAF54Du38= -github.com/multiformats/go-multistream v0.2.1/go.mod h1:5GZPQZbkWOLOn3J2y4Y99vVW7vOfsAflxARk3x14o6k= -github.com/multiformats/go-multistream v0.2.2/go.mod h1:UIcnm7Zuo8HKG+HkWgfQsGL+/MIEhyTqbODbIUwSXKs= -github.com/multiformats/go-multistream v0.4.1 h1:rFy0Iiyn3YT0asivDUIR05leAdwZq3de4741sbiSdfo= -github.com/multiformats/go-multistream v0.4.1/go.mod h1:Mz5eykRVAjJWckE2U78c6xqdtyNUEhKSM0Lwar2p77Q= +github.com/multiformats/go-multistream v0.5.0 h1:5htLSLl7lvJk3xx3qT/8Zm9J4K8vEOf/QGkvOGQAyiE= +github.com/multiformats/go-multistream v0.5.0/go.mod h1:n6tMZiwiP2wUsR8DgfDWw1dydlEqV3l6N3/GBsX6ILA= github.com/multiformats/go-varint v0.0.1/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.2/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.5/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= -github.com/multiformats/go-varint v0.0.6/go.mod h1:3Ls8CIEsrijN6+B7PbrXRPxHRPuXSrVKRY101jdMZYE= github.com/multiformats/go-varint v0.0.7 h1:sWSGR+f/eu5ABZA2ZpYKBILXTTs9JWpdEM/nEGOHFS8= github.com/multiformats/go-varint v0.0.7/go.mod h1:r8PUYw/fD/SjBCiKOoDlGF6QawOELpZAu9eioSos/OU= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= -github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= -github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= -github.com/nats-io/nats.go v1.9.1/go.mod h1:ZjDU1L/7fJ09jvUSRVBR2e7+RnLiiIQyqyzEE/Zbp4w= -github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= -github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= -github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= -github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= -github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= -github.com/onsi/ginkgo/v2 v2.11.0 h1:WgqUCUt/lT6yXoQ8Wef0fsNn5cAuMK7+KT9UFRz2tcU= -github.com/onsi/ginkgo/v2 v2.11.0/go.mod h1:ZhrRA5XmEE3x3rhlzamx/JJvujdZoJ2uvgI7kR0iZvM= -github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= -github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.27.8 h1:gegWiwZjBsf2DgiSbf5hpokZ98JVDMcWkUiigk6/KXc= -github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= -github.com/opencontainers/runtime-spec v1.0.2 h1:UfAcuLBJB9Coz72x1hgl8O5RVzTdNiaglX6v2DM6FI0= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= -github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= -github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= -github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/opencontainers/runtime-spec v1.1.0 h1:HHUyrt9mwHUjtasSbXSMvs4cyFxh+Bll4AjJ9odEGpg= +github.com/opencontainers/runtime-spec v1.1.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= -github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= -github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= -github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ= -github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4= -github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= -github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= +github.com/pelletier/go-toml/v2 v2.1.0/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc= +github.com/perimeterx/marshmallow v1.1.5 h1:a2LALqQ1BlHM8PZblsDdidgv1mWi1DgC2UmX50IvK2s= +github.com/perimeterx/marshmallow v1.1.5/go.mod h1:dsXbUu8CRzfYP5a87xpp0xq9S3u0Vchtcl8we9tYaXw= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg= -github.com/planetscale/vtprotobuf v0.5.0 h1:l8PXm6Colok5z6qQLNhAj2Jq5BfoMTIHxLER5a6nDqM= -github.com/planetscale/vtprotobuf v0.5.0/go.mod h1:wm1N3qk9G/4+VM1WhpkLbvY/d8+0PbwYYpP5P5VhTks= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/polydawn/refmt v0.0.0-20190221155625-df39d6c2d992/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20190408063855-01bf1e26dd14/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= -github.com/polydawn/refmt v0.0.0-20201211092308-30ac6d18308e/go.mod h1:uIp+gprXxxrWSjjklXD+mN4wed/tMfjMMmN/9+JsA9o= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/polydawn/refmt v0.89.0 h1:ADJTApkvkeBZsN0tBTx8QjpD9JkmxbKp0cxfr9qszm4= github.com/polydawn/refmt v0.89.0/go.mod h1:/zvteZs/GwLtCgZ4BL6CBsk9IKIlexP43ObX9AxTqTw= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q= +github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= -github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= +github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= +github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= -github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY= +github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= -github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= +github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= +github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-19 v0.3.3 h1:wznEHvJwd+2X3PqftRha0SUKmGsnb6dfArMhy9PeJVE= -github.com/quic-go/qtls-go1-19 v0.3.3/go.mod h1:ySOI96ew8lnoKPtSqx2BlI5wCpUVPT05RMAlajtnyOI= -github.com/quic-go/qtls-go1-20 v0.2.3 h1:m575dovXn1y2ATOb1XrRFcrv0F+EQmlowTkoraNkDPI= -github.com/quic-go/qtls-go1-20 v0.2.3/go.mod h1:JKtK6mjbAVcUTN/9jZpvLbGxvdWIKS8uT7EiStoU1SM= -github.com/quic-go/quic-go v0.36.4 h1:CXn/ZLN5Vntlk53fjR+kUMC8Jt7flfQe+I5Ty5A+k0o= -github.com/quic-go/quic-go v0.36.4/go.mod h1:qxQumdeKw5GmWs1OsTZZnOxzSI+RJWuhf1O8FN35L2o= -github.com/quic-go/webtransport-go v0.5.3 h1:5XMlzemqB4qmOlgIus5zB45AcZ2kCgCy2EptUrfOPWU= -github.com/quic-go/webtransport-go v0.5.3/go.mod h1:OhmmgJIzTTqXK5xvtuX0oBpLV2GkLWNDA+UeTGJXErU= +github.com/quic-go/qtls-go1-20 v0.3.4 h1:MfFAPULvst4yoMgY9QmtpYmfij/em7O8UUi+bNVm7Cg= +github.com/quic-go/qtls-go1-20 v0.3.4/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.39.3 h1:o3YB6t2SR+HU/pgwF29kJ6g4jJIJEwEZ8CKia1h1TKg= +github.com/quic-go/quic-go v0.39.3/go.mod h1:T09QsDQWjLiQ74ZmacDfqZmhY/NLnw5BC40MANNNZ1Q= +github.com/quic-go/webtransport-go v0.6.0 h1:CvNsKqc4W2HljHJnoT+rMmbRJybShZ0YPFDD3NxaZLY= +github.com/quic-go/webtransport-go v0.6.0/go.mod h1:9KjU4AEBqEQidGHNDkZrb8CAa1abRaosM2yGOyiikEc= github.com/raulk/go-watchdog v1.3.0 h1:oUmdlHxdkXRJlwfG0O9omj8ukerm8MEQavSiDTEtBsk= github.com/raulk/go-watchdog v1.3.0/go.mod h1:fIvOnLbF0b0ZwkB9YU4mOW9Did//4vPZtDqv66NfsMU= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.9.0 h1:73kH8U+JUqXU8lRuOHeVHaa/SZPifC7BkcraZVejAe8= +github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= -github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sagikazarmark/locafero v0.3.0 h1:zT7VEGWC2DTflmccN/5T1etyKvxSxpHsjb9cJvm4SvQ= +github.com/sagikazarmark/locafero v0.3.0/go.mod h1:w+v7UsPNFwzF1cHuOajOOzoq4U7v/ig1mpRjqV+Bu1U= +github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= +github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= @@ -1177,63 +552,36 @@ github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go. github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4= github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= -github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= github.com/smartystreets/assertions v1.2.0 h1:42S6lae5dvLc7BrLu/0ugRtcFVjoJNMC/N3yZFZkDFs= github.com/smartystreets/assertions v1.2.0/go.mod h1:tcbTF8ujkAEcZ8TElKY+i30BzYlVhC/LOxJk7iOWnoo= -github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.7.2 h1:9RBaZCeXEQ3UselpuwUQHltGVXvdwm6cv1hgR6gDIPg= github.com/smartystreets/goconvey v1.7.2/go.mod h1:Vw0tHAZW6lzCRk3xgdin6fKYcG+G3Pg9vgXWeJpQFMM= -github.com/smola/gocompat v0.2.0/go.mod h1:1B0MlxbmoZNo3h8guHp8HztB3BSYR5itql9qtVc0ypY= -github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE= +github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= +github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA= -github.com/sourcenetwork/badger/v4 v4.0.0-20230801145501-d3a57bd4c2ec h1:br39/Te7XrQmirI+QtT6YblhD9T6B2dzDNI9eoI26Pg= -github.com/sourcenetwork/badger/v4 v4.0.0-20230801145501-d3a57bd4c2ec/go.mod h1:qfCqhPoWDFJRx1gp5QwwyGo8xk1lbHUxvK9nK0OGAak= -github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.0-20230209220544-e16d5e34c4fc h1:ZrkklfmN27eENGFmzblEcs35PJ02hmlNgMHE8XJqFAo= -github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.0-20230209220544-e16d5e34c4fc/go.mod h1:3rOV6TxePSwADKpnwXBKpTjAA4QyjZBus13xc6VCtSw= -github.com/sourcenetwork/graphql-go v0.7.10-0.20230511091704-fe7085512c23 h1:QcSWSYlE1alUC0uOO/trppYMLpR8OuFIL8IqR+PR5sA= -github.com/sourcenetwork/graphql-go v0.7.10-0.20230511091704-fe7085512c23/go.mod h1:3Ty9EMes+aoxl8xS0CsuCGQZ4JEsOlC5yqQDLOKoBRw= +github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276 h1:TpQDDPfucDgCNH0NVqVUk6SSq6T6G8p9HIocmwZh9Tg= +github.com/sourcenetwork/badger/v4 v4.2.1-0.20231113215945-a63444ca5276/go.mod h1:lxiZTDBw0vheFMqSwX2OvB6RTDI1+/UtVCSU4rpThFM= +github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13 h1:d/PeGZutd5NcDr6ltAv8ubN5PxsHMp1YUnhHY/QCWB4= +github.com/sourcenetwork/go-libp2p-pubsub-rpc v0.0.13/go.mod h1:jUoQv592uUX1u7QBjAY4C+l24X9ArhPfifOqXpDHz4U= +github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd h1:lmpW39/8wPJ0khWRhOcj7Bj0HYKbSmQ8rXMJw1cMB8U= +github.com/sourcenetwork/graphql-go v0.7.10-0.20231113214537-a9560c1898dd/go.mod h1:rkahXkgRH/3vZErN1Bx+qt1+w+CV5fgaJyKKWgISe4U= github.com/sourcenetwork/immutable v0.3.0 h1:gHPtGvLrTBTK5YpDAhMU+u+S8v1F6iYmc3nbZLryMdc= github.com/sourcenetwork/immutable v0.3.0/go.mod h1:GD7ceuh/HD7z6cdIwzKK2ctzgZ1qqYFJpsFp+8qYnbI= -github.com/spacemonkeygo/openssl v0.0.0-20181017203307-c2dcc5cca94a/go.mod h1:7AyxJNCJ7SBZ1MfVQCWD6Uqo2oubI2Eq2y2eqf+A5r0= -github.com/spacemonkeygo/spacelog v0.0.0-20180420211403-2296661a0572/go.mod h1:w0SWMsp6j9O/dk4/ZpIhL+3CkG8ofA2vuv7k+ltqUMc= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= -github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= -github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/afero v1.10.0 h1:EaGW2JJh15aKOejeuJ+wpFSHnbd7GE6Wvp3TsNhb6LY= +github.com/spf13/afero v1.10.0/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= github.com/spf13/cast v1.5.1/go.mod h1:b9PdjNptOpzXr7Rq1q9gJML/2cdGQAo69NKzQ10KN48= -github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk= -github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= -github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= -github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.16.0 h1:rGGH0XDZhdUOryiDWjmIvUSWpbNqisK8Wk0Vyefw8hc= -github.com/spf13/viper v1.16.0/go.mod h1:yg78JgCJcbrQOvV9YLXgkLaZqUidkY9K+Dd1FofRzQg= -github.com/src-d/envconfig v1.0.0/go.mod h1:Q9YQZ7BKITldTBnoxsE5gOeB5y66RyPXeue/R4aaNBc= -github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= -github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= +github.com/spf13/viper v1.17.0 h1:I5txKw7MJasPL/BrfkbA0Jyo/oELqVmux4pR/UxOMfI= +github.com/spf13/viper v1.17.0/go.mod h1:BmMMMLQXSbcHK6KAOiFLz0l5JHrU89OdIRHvsk0+yVI= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= @@ -1246,13 +594,11 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/subosito/gotenv v1.4.2 h1:X1TuBLAMDFbaTAChgCBLu3DU3UPyELpnF2jjJ2cz/S8= -github.com/subosito/gotenv v1.4.2/go.mod h1:ayKnFf/c6rvx/2iiLrJUk1e6plDbT3edrFNGqEflhK0= +github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= +github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= -github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA= github.com/tetratelabs/wazero v1.3.1 h1:rnb9FgOEQRLLR8tgoD1mfjNjMhFeWRUk+a4b4j/GpUM= github.com/tetratelabs/wazero v1.3.1/go.mod h1:wYx2gNRg8/WihJfSDxA1TIL8H+GkfLYm+bIfbblu9VQ= @@ -1260,14 +606,10 @@ github.com/textileio/go-datastore-extensions v1.0.1 h1:qIJGqJaigQ1wD4TdwS/hf73u0 github.com/textileio/go-ds-badger3 v0.1.0 h1:q0kBuBmAcRUR3ClMSYlyw0224XeuzjjGinU53Qz1uXI= github.com/textileio/go-log/v2 v2.1.3-gke-2 h1:YkMA5ua0Cf/X6CkbexInsoJ/HdaHQBlgiv9Yy9hddNM= github.com/textileio/go-log/v2 v2.1.3-gke-2/go.mod h1:DwACkjFS3kjZZR/4Spx3aPfSsciyslwUe5bxV8CEU2w= -github.com/tidwall/btree v1.6.0 h1:LDZfKfQIBHGHWSwckhXI0RPSXzlo+KYdjK7FWSqOzzg= -github.com/tidwall/btree v1.6.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= -github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/tidwall/btree v1.7.0 h1:L1fkJH/AuEh5zBnnBbmTwQ5Lt+bRJ5A8EWecslvo9iI= +github.com/tidwall/btree v1.7.0/go.mod h1:twD9XRA5jj9VUQGELzDO4HPQTNJsoWWfYEL+EUQ2cKY= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= -github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= -github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.10/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/valyala/fastjson v1.6.4 h1:uAUNq9Z6ymTgGhcm0UynUAB6tlbakBrz6CQFax3BXVQ= @@ -1277,123 +619,74 @@ github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMI github.com/vito/go-sse v1.0.0 h1:e6/iTrrvy8BRrOwJwmQmlndlil+TLdxXvHi55ZDzH6M= github.com/vito/go-sse v1.0.0/go.mod h1:2wkcaQ+jtlZ94Uve8gYZjFpL68luAjssTINA2hpgcZs= github.com/warpfork/go-testmark v0.12.1 h1:rMgCpJfwy1sJ50x0M0NgyphxYYPMOODIJHhsXyEHU0s= -github.com/warpfork/go-wish v0.0.0-20180510122957-5ad1f5abf436/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/warpfork/go-wish v0.0.0-20190328234359-8b3e70f8e830/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/warpfork/go-wish v0.0.0-20200122115046-b9ea61034e4a/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0 h1:GDDkbFiaK8jsSDJfjId/PEGEShv6ugrt4kYsC5UIDaQ= github.com/warpfork/go-wish v0.0.0-20220906213052-39a1cc7a02d0/go.mod h1:x6AKhvSSexNrVSrViXSHUEbICjmGXhtgABaHIySUSGw= -github.com/whyrusleeping/cbor-gen v0.0.0-20200123233031-1cdf64d27158/go.mod h1:Xj/M2wWU+QdTdRbu/L/1dIZY8/Wb2K9pAhtroQuxJJI= -github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa h1:EyA027ZAkuaCLoxVX4r1TZMPy1d31fM6hbfQ4OU4I5o= -github.com/whyrusleeping/cbor-gen v0.0.0-20230126041949-52956bd4c9aa/go.mod h1:fgkXqYy7bV2cFeIEOkVTZS/WjXARfBqSH6Q2qHL33hQ= +github.com/whyrusleeping/base32 v0.0.0-20170828182744-c30ac30633cc h1:BCPnHtcboadS0DvysUuJXZ4lWVv5Bh5i7+tbIyi+ck4= github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f h1:jQa4QT2UP9WYv2nzyawpKMOCl+Z/jW7djv2/J50lj9E= -github.com/whyrusleeping/chunker v0.0.0-20181014151217-fe64bd25879f/go.mod h1:p9UJB6dDgdPgMJZs7UjUOdulKyRr9fqkS+6JKAInPy8= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1 h1:EKhdznlJHPMoKr0XTrX+IlJs1LH3lyx2nfr1dOlZ79k= github.com/whyrusleeping/go-keyspace v0.0.0-20160322163242-5b898ac5add1/go.mod h1:8UvriyWtv5Q5EOgjHaSseUEdkQfvwFv1I/In/O2M9gc= -github.com/whyrusleeping/go-logging v0.0.0-20170515211332-0457bb6b88fc/go.mod h1:bopw91TMyo8J3tvftk8xmU2kPmlrt4nScJQZU2hE5EM= -github.com/whyrusleeping/go-logging v0.0.1/go.mod h1:lDPYj54zutzG1XYfHAhcc7oNXEburHQBn+Iqd4yS4vE= -github.com/whyrusleeping/go-notifier v0.0.0-20170827234753-097c5d47330f/go.mod h1:cZNvX9cFybI01GriPRMXDtczuvUhgbcYr9iCGaNlRv8= -github.com/whyrusleeping/mafmt v1.2.8/go.mod h1:faQJFPbLSxzD9xpA02ttW/tS9vZykNvXwGvqIpk20FA= -github.com/whyrusleeping/mdns v0.0.0-20180901202407-ef14215e6b30/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/mdns v0.0.0-20190826153040-b9b60ed33aa9/go.mod h1:j4l84WPFclQPj320J9gp0XwNKBb3U0zt5CBqjPp22G4= -github.com/whyrusleeping/multiaddr-filter v0.0.0-20160516205228-e903e4adabd7/go.mod h1:X2c0RVCI1eSUFI8eLcY3c0423ykwiUdxLJtkDvruhjI= -github.com/x-cray/logrus-prefixed-formatter v0.5.2/go.mod h1:2duySbKsL6M18s5GU7VPsoEPHyzalCE06qoARUCeBBE= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= -go.opencensus.io v0.22.1/go.mod h1:Ap50jQcDJrx6rB6VgeeFPtuPIf3wMRvRfrfYDO6+BmA= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/otel v1.18.0 h1:TgVozPGZ01nHyDZxK5WGPFB9QexeTMXEH7+tIClWfzs= -go.opentelemetry.io/otel v1.18.0/go.mod h1:9lWqYO0Db579XzVuCKFNPDl4s73Voa+zEck3wHaAYQI= -go.opentelemetry.io/otel/metric v1.18.0 h1:JwVzw94UYmbx3ej++CwLUQZxEODDj/pOuTCvzhtRrSQ= -go.opentelemetry.io/otel/metric v1.18.0/go.mod h1:nNSpsVDjWGfb7chbRLUNW+PBNdcSTHD4Uu5pfFMOI0k= -go.opentelemetry.io/otel/sdk v1.17.0 h1:FLN2X66Ke/k5Sg3V623Q7h7nt3cHXaW1FOvKKrW0IpE= -go.opentelemetry.io/otel/sdk v1.17.0/go.mod h1:U87sE0f5vQB7hwUoW98pW5Rz4ZDuCFBZFNUBlSgmDFQ= -go.opentelemetry.io/otel/sdk/metric v0.40.0 h1:qOM29YaGcxipWjL5FzpyZDpCYrDREvX0mVlmXdOjCHU= -go.opentelemetry.io/otel/sdk/metric v0.40.0/go.mod h1:dWxHtdzdJvg+ciJUKLTKwrMe5P6Dv3FyDbh8UkfgkVs= -go.opentelemetry.io/otel/trace v1.18.0 h1:NY+czwbHbmndxojTEKiSMHkG2ClNH2PwmcHrdo0JY10= -go.opentelemetry.io/otel/trace v1.18.0/go.mod h1:T2+SGJGuYZY3bjj5rgh/hN7KIrlpWC5nS8Mjvzckz+0= -go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.opentelemetry.io/otel v1.19.0 h1:MuS/TNf4/j4IXsZuJegVzI1cwut7Qc00344rgH7p8bs= +go.opentelemetry.io/otel v1.19.0/go.mod h1:i0QyjOq3UPoTzff0PJB2N66fb4S0+rSbSB15/oyH9fY= +go.opentelemetry.io/otel/metric v1.19.0 h1:aTzpGtV0ar9wlV4Sna9sdJyII5jTVJEvKETPiOKwvpE= +go.opentelemetry.io/otel/metric v1.19.0/go.mod h1:L5rUsV9kM1IxCj1MmSdS+JQAcVm319EUrDVLrt7jqt8= +go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o= +go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A= +go.opentelemetry.io/otel/sdk/metric v1.19.0 h1:EJoTO5qysMsYCa+w4UghwFV/ptQgqSL/8Ni+hx+8i1k= +go.opentelemetry.io/otel/sdk/metric v1.19.0/go.mod h1:XjG0jQyFJrv2PbMvwND7LwCEhsJzCzV5210euduKcKY= +go.opentelemetry.io/otel/trace v1.19.0 h1:DFVQmlVbfVeOuBRrwdtaehRrWiL1JoVs9CPIQ1Dzxpg= +go.opentelemetry.io/otel/trace v1.19.0/go.mod h1:mfaSyvGyEJEI0nyV2I4qhNQnbBOUUmYZpYojqMnX2vo= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE= -go.uber.org/dig v1.17.0 h1:5Chju+tUvcC+N7N6EV08BJz41UZuO3BmHcN4A287ZLI= -go.uber.org/dig v1.17.0/go.mod h1:rTxpf7l5I0eBTlE6/9RL+lDybC7WFwY2QH55ZSjy1mU= -go.uber.org/fx v1.20.0 h1:ZMC/pnRvhsthOZh9MZjMq5U8Or3mA9zBSPaLnzs3ihQ= -go.uber.org/fx v1.20.0/go.mod h1:qCUj0btiR3/JnanEr1TYEePfSw6o/4qYJscgvzQ5Ub0= -go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= +go.uber.org/dig v1.17.1 h1:Tga8Lz8PcYNsWsyHMZ1Vm0OQOUaJNDyvPImgbAu9YSc= +go.uber.org/dig v1.17.1/go.mod h1:Us0rSJiThwCv2GteUN0Q7OKvU7n5J4dxZ9JKUXozFdE= +go.uber.org/fx v1.20.1 h1:zVwVQGS8zYvhh9Xxcu4w1M6ESyeMzebzj2NbSayZ4Mk= +go.uber.org/fx v1.20.1/go.mod h1:iSYNbHf2y55acNCwCXKx7LbWb5WG1Bnue5RDXz1OREg= go.uber.org/goleak v1.1.11-0.20210813005559-691160354723/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk= -go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo= +go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= -go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= +go.uber.org/zap v1.26.0 h1:sI7k6L95XOKS281NhVKOFCUNIvv9e0w4BF8N3u+tCRo= +go.uber.org/zap v1.26.0/go.mod h1:dtElttAiwGvoJ/vj4IwHBS/gXsEu/pZ50mUIRWuG0so= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= -golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190225124518-7f87c0fbb88b/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190618222545-ea8f1a30c443/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200602180216-279210d13fed/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.13.0 h1:mvySKfSWJ+UKUii46M40LOvyWfN0s2U+46/jDd0e6Ck= -golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc= +golang.org/x/crypto v0.15.0 h1:frVn1TEaCEaZcn3Tmd7Y2b5KKPaZ+I32Q2OA3kYp5TA= +golang.org/x/crypto v0.15.0/go.mod h1:4ChreQoLWfG3xLDer1WdlH5NdlQ3+mwnQq1YTKY+72g= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1404,8 +697,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 h1:MGwJjxBy0HJshjDNfLsYO8xppfqWlA5ZT9OhtUUhTNw= -golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa h1:FRnLl4eNAQl8hwxVVC17teOw8kdjVDVAiFMtgUdTSRQ= +golang.org/x/exp v0.0.0-20231110203233-9a3e6036ecaa/go.mod h1:zk2irFbV9DP96SEBUUAy67IdHUaZuSnrz1n472HUCLE= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1431,37 +724,24 @@ golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= -golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180719180050-a680a1efc54d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= +golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181011144130-49bb7cea24b1/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190227160552-c95aed5357e7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190611141213-3f473d35a33a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1484,12 +764,10 @@ golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.18.0 h1:mIYleuAkSbHh0tCv7RvjL3F6ZVbLjq4+R7zbOn3Kokg= +golang.org/x/net v0.18.0/go.mod h1:/czyP5RqHAH4odGYxBJ1qz0+CE5WZ+2j1YgoEo8F2jQ= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1513,52 +791,29 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181026203630-95b1ffbd15a5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190219092855-153ac476189d/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190228124157-a34e9553db1e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190302025703-b6889370fb10/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190524122548-abf6ff778158/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190524152521-dbbf3f1254d4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190526052359-791d8a0f4d09/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190610200419-93c9922d18ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200124204421-9fbb57f87de9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1570,39 +825,30 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200602225109-6fdc65e7d980/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210317225723-c4fcb01b228e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.12.0 h1:CM0HF96J0hcLAwsHPJZjfdNzs0gftsLfgKt57wWHJ0o= -golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/sys v0.14.0 h1:Vz7Qs629MkJkGyHxUlRHizWJRG2j8fbQKjELVSNhy7Q= +golang.org/x/sys v0.14.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1612,18 +858,16 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.13.0 h1:ablQoSUd0tRdKxZewP80B+BaqeKJuVhuRxj/dkrun3k= -golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181130052023-1c3d964395ce/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= @@ -1641,16 +885,13 @@ golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -1674,26 +915,23 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.11.0 h1:EMCa6U9S2LtZXLAMoWiR/R8dAQFRqbAitmbJ2UKhoi8= -golang.org/x/tools v0.11.0/go.mod h1:anzJrxPjNtfgiYQYirP2CPGzGLxrH2u2QBhn6Bf3qY8= +golang.org/x/tools v0.15.0 h1:zdAyfUGbYmuVokhzVmghFl2ZJh5QhcfebBgmVPFYA+8= +golang.org/x/tools v0.15.0/go.mod h1:hpksKq4dtpQWS1uQ61JkdqWM3LscIS6Slf+VVkm+wQk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk= -golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8= gonum.org/v1/gonum v0.13.0 h1:a0T3bh+7fhRyqeNbiC3qVHYmkiQgit3wnNan/2c0HMM= gonum.org/v1/gonum v0.13.0/go.mod h1:/WPYRckkfWrhWefxyYTfrTtQR0KH4iyHNuzxqXAKyAU= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= @@ -1731,7 +969,6 @@ google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRn google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= @@ -1748,7 +985,6 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= @@ -1764,25 +1000,20 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= -google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= -google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= @@ -1790,8 +1021,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.58.0 h1:32JY8YpPMSR45K+c3o6b8VL73V+rR8k+DeMIr4vRH8o= -google.golang.org/grpc v1.58.0/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -1806,35 +1037,26 @@ google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp0 google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= -gopkg.in/src-d/go-cli.v0 v0.0.0-20181105080154-d492247bbc0d/go.mod h1:z+K8VcOYVYcSwSjGebuDL6176A1XskgbtNl64NSg+n8= -gopkg.in/src-d/go-log.v1 v1.0.1/go.mod h1:GN34hKP0g305ysm2/hctJ0Y8nWP3zxXXJ8GFabTyABE= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= @@ -1851,7 +1073,5 @@ lukechampine.com/blake3 v1.2.1/go.mod h1:0OFRp7fBtAylGVCO40o87sbupkyIGgbpv1+M1k1 rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= -sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck= sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0= diff --git a/http/client.go b/http/client.go index 16a8924a65..148715e877 100644 --- a/http/client.go +++ b/http/client.go @@ -35,11 +35,10 @@ type Client struct { } func NewClient(rawURL string) (*Client, error) { - baseURL, err := url.Parse(rawURL) + httpClient, err := newHttpClient(rawURL) if err != nil { return nil, err } - httpClient := newHttpClient(baseURL.JoinPath("/api/v0")) return &Client{httpClient}, nil } @@ -88,10 +87,10 @@ func (c *Client) WithTxn(tx datastore.Txn) client.Store { return &Client{client} } -func (c *Client) SetReplicator(ctx context.Context, rep client.Replicator) error { - methodURL := c.http.baseURL.JoinPath("p2p", "replicators") +func (c *Client) BasicImport(ctx context.Context, filepath string) error { + methodURL := c.http.baseURL.JoinPath("backup", "import") - body, err := json.Marshal(rep) + body, err := json.Marshal(&client.BackupConfig{Filepath: filepath}) if err != nil { return err } @@ -103,50 +102,14 @@ func (c *Client) SetReplicator(ctx context.Context, rep client.Replicator) error return err } -func (c *Client) DeleteReplicator(ctx context.Context, rep client.Replicator) error { - methodURL := c.http.baseURL.JoinPath("p2p", "replicators") - - body, err := json.Marshal(rep) - if err != nil { - return err - } - req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), bytes.NewBuffer(body)) - if err != nil { - return err - } - _, err = c.http.request(req) - return err -} - -func (c *Client) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { - methodURL := c.http.baseURL.JoinPath("p2p", "replicators") - - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) - if err != nil { - return nil, err - } - var reps []client.Replicator - if err := c.http.requestJson(req, &reps); err != nil { - return nil, err - } - return reps, nil -} - -func (c *Client) AddP2PCollection(ctx context.Context, collectionID string) error { - methodURL := c.http.baseURL.JoinPath("p2p", "collections", collectionID) +func (c *Client) BasicExport(ctx context.Context, config *client.BackupConfig) error { + methodURL := c.http.baseURL.JoinPath("backup", "export") - req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), nil) + body, err := json.Marshal(config) if err != nil { return err } - _, err = c.http.request(req) - return err -} - -func (c *Client) RemoveP2PCollection(ctx context.Context, collectionID string) error { - methodURL := c.http.baseURL.JoinPath("p2p", "collections", collectionID) - - req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body)) if err != nil { return err } @@ -154,43 +117,34 @@ func (c *Client) RemoveP2PCollection(ctx context.Context, collectionID string) e return err } -func (c *Client) GetAllP2PCollections(ctx context.Context) ([]string, error) { - methodURL := c.http.baseURL.JoinPath("p2p", "collections") +func (c *Client) AddSchema(ctx context.Context, schema string) ([]client.CollectionDescription, error) { + methodURL := c.http.baseURL.JoinPath("schema") - req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), strings.NewReader(schema)) if err != nil { return nil, err } - var cols []string + var cols []client.CollectionDescription if err := c.http.requestJson(req, &cols); err != nil { return nil, err } return cols, nil } -func (c *Client) BasicImport(ctx context.Context, filepath string) error { - methodURL := c.http.baseURL.JoinPath("backup", "import") - - body, err := json.Marshal(&client.BackupConfig{Filepath: filepath}) - if err != nil { - return err - } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body)) - if err != nil { - return err - } - _, err = c.http.request(req) - return err +type patchSchemaRequest struct { + Patch string + SetAsDefaultVersion bool } -func (c *Client) BasicExport(ctx context.Context, config *client.BackupConfig) error { - methodURL := c.http.baseURL.JoinPath("backup", "export") +func (c *Client) PatchSchema(ctx context.Context, patch string, setAsDefaultVersion bool) error { + methodURL := c.http.baseURL.JoinPath("schema") - body, err := json.Marshal(config) + body, err := json.Marshal(patchSchemaRequest{patch, setAsDefaultVersion}) if err != nil { return err } - req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body)) + + req, err := http.NewRequestWithContext(ctx, http.MethodPatch, methodURL.String(), bytes.NewBuffer(body)) if err != nil { return err } @@ -198,24 +152,10 @@ func (c *Client) BasicExport(ctx context.Context, config *client.BackupConfig) e return err } -func (c *Client) AddSchema(ctx context.Context, schema string) ([]client.CollectionDescription, error) { - methodURL := c.http.baseURL.JoinPath("schema") - - req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), strings.NewReader(schema)) - if err != nil { - return nil, err - } - var cols []client.CollectionDescription - if err := c.http.requestJson(req, &cols); err != nil { - return nil, err - } - return cols, nil -} - -func (c *Client) PatchSchema(ctx context.Context, patch string) error { - methodURL := c.http.baseURL.JoinPath("schema") +func (c *Client) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { + methodURL := c.http.baseURL.JoinPath("schema", "default") - req, err := http.NewRequestWithContext(ctx, http.MethodPatch, methodURL.String(), strings.NewReader(patch)) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), strings.NewReader(schemaVersionID)) if err != nil { return err } @@ -239,29 +179,33 @@ func (c *Client) GetCollectionByName(ctx context.Context, name client.Collection if err != nil { return nil, err } - var description client.CollectionDescription - if err := c.http.requestJson(req, &description); err != nil { + var definition client.CollectionDefinition + if err := c.http.requestJson(req, &definition); err != nil { return nil, err } - return &Collection{c.http, description}, nil + return &Collection{c.http, definition}, nil } -func (c *Client) GetCollectionBySchemaID(ctx context.Context, schemaId string) (client.Collection, error) { +func (c *Client) GetCollectionsBySchemaRoot(ctx context.Context, schemaRoot string) ([]client.Collection, error) { methodURL := c.http.baseURL.JoinPath("collections") - methodURL.RawQuery = url.Values{"schema_id": []string{schemaId}}.Encode() + methodURL.RawQuery = url.Values{"schema_root": []string{schemaRoot}}.Encode() req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) if err != nil { return nil, err } - var description client.CollectionDescription - if err := c.http.requestJson(req, &description); err != nil { + var descriptions []client.CollectionDefinition + if err := c.http.requestJson(req, &descriptions); err != nil { return nil, err } - return &Collection{c.http, description}, nil + collections := make([]client.Collection, len(descriptions)) + for i, d := range descriptions { + collections[i] = &Collection{c.http, d} + } + return collections, nil } -func (c *Client) GetCollectionByVersionID(ctx context.Context, versionId string) (client.Collection, error) { +func (c *Client) GetCollectionsByVersionID(ctx context.Context, versionId string) ([]client.Collection, error) { methodURL := c.http.baseURL.JoinPath("collections") methodURL.RawQuery = url.Values{"version_id": []string{versionId}}.Encode() @@ -269,11 +213,15 @@ func (c *Client) GetCollectionByVersionID(ctx context.Context, versionId string) if err != nil { return nil, err } - var description client.CollectionDescription - if err := c.http.requestJson(req, &description); err != nil { + var descriptions []client.CollectionDefinition + if err := c.http.requestJson(req, &descriptions); err != nil { return nil, err } - return &Collection{c.http, description}, nil + collections := make([]client.Collection, len(descriptions)) + for i, d := range descriptions { + collections[i] = &Collection{c.http, d} + } + return collections, nil } func (c *Client) GetAllCollections(ctx context.Context) ([]client.Collection, error) { @@ -283,7 +231,7 @@ func (c *Client) GetAllCollections(ctx context.Context) ([]client.Collection, er if err != nil { return nil, err } - var descriptions []client.CollectionDescription + var descriptions []client.CollectionDefinition if err := c.http.requestJson(req, &descriptions); err != nil { return nil, err } @@ -294,6 +242,65 @@ func (c *Client) GetAllCollections(ctx context.Context) ([]client.Collection, er return collections, nil } +func (c *Client) GetSchemasByName(ctx context.Context, name string) ([]client.SchemaDescription, error) { + methodURL := c.http.baseURL.JoinPath("schema") + methodURL.RawQuery = url.Values{"name": []string{name}}.Encode() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) + if err != nil { + return nil, err + } + var schema []client.SchemaDescription + if err := c.http.requestJson(req, &schema); err != nil { + return nil, err + } + return schema, nil +} + +func (c *Client) GetSchemaByVersionID(ctx context.Context, versionID string) (client.SchemaDescription, error) { + methodURL := c.http.baseURL.JoinPath("schema") + methodURL.RawQuery = url.Values{"version_id": []string{versionID}}.Encode() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) + if err != nil { + return client.SchemaDescription{}, err + } + var schema client.SchemaDescription + if err := c.http.requestJson(req, &schema); err != nil { + return client.SchemaDescription{}, err + } + return schema, nil +} + +func (c *Client) GetSchemasByRoot(ctx context.Context, root string) ([]client.SchemaDescription, error) { + methodURL := c.http.baseURL.JoinPath("schema") + methodURL.RawQuery = url.Values{"root": []string{root}}.Encode() + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) + if err != nil { + return nil, err + } + var schema []client.SchemaDescription + if err := c.http.requestJson(req, &schema); err != nil { + return nil, err + } + return schema, nil +} + +func (c *Client) GetAllSchemas(ctx context.Context) ([]client.SchemaDescription, error) { + methodURL := c.http.baseURL.JoinPath("schema") + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) + if err != nil { + return nil, err + } + var schema []client.SchemaDescription + if err := c.http.requestJson(req, &schema); err != nil { + return nil, err + } + return schema, nil +} + func (c *Client) GetAllIndexes(ctx context.Context) (map[client.CollectionName][]client.IndexDescription, error) { methodURL := c.http.baseURL.JoinPath("indexes") @@ -397,7 +404,7 @@ func (c *Client) PrintDump(ctx context.Context) error { return err } -func (c *Client) Close(ctx context.Context) { +func (c *Client) Close() { // do nothing } @@ -409,6 +416,10 @@ func (c *Client) Blockstore() blockstore.Blockstore { panic("client side database") } +func (c *Client) Peerstore() datastore.DSBatching { + panic("client side database") +} + func (c *Client) Events() events.Events { panic("client side database") } diff --git a/http/client_collection.go b/http/client_collection.go index 16157a9f96..9f56594db7 100644 --- a/http/client_collection.go +++ b/http/client_collection.go @@ -32,35 +32,39 @@ var _ client.Collection = (*Collection)(nil) // Collection implements the client.Collection interface over HTTP. type Collection struct { http *httpClient - desc client.CollectionDescription + def client.CollectionDefinition } func (c *Collection) Description() client.CollectionDescription { - return c.desc + return c.def.Description } func (c *Collection) Name() string { - return c.desc.Name + return c.Description().Name } func (c *Collection) Schema() client.SchemaDescription { - return c.desc.Schema + return c.def.Schema } func (c *Collection) ID() uint32 { - return c.desc.ID + return c.Description().ID } -func (c *Collection) SchemaID() string { - return c.desc.Schema.SchemaID +func (c *Collection) SchemaRoot() string { + return c.Schema().Root +} + +func (c *Collection) Definition() client.CollectionDefinition { + return c.def } func (c *Collection) Create(ctx context.Context, doc *client.Document) error { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) // We must call this here, else the doc key on the given object will not match // that of the document saved in the database - err := doc.RemapAliasFieldsAndDockey(c.Description().Schema.Fields) + err := doc.RemapAliasFieldsAndDockey(c.Schema().Fields) if err != nil { return err } @@ -82,18 +86,18 @@ func (c *Collection) Create(ctx context.Context, doc *client.Document) error { } func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) error { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) var docMapList []json.RawMessage for _, doc := range docs { // We must call this here, else the doc key on the given object will not match // that of the document saved in the database - err := doc.RemapAliasFieldsAndDockey(c.Description().Schema.Fields) + err := doc.RemapAliasFieldsAndDockey(c.Schema().Fields) if err != nil { return err } - docMap, err := documentJSON(doc) + docMap, err := doc.ToJSONPatch() if err != nil { return err } @@ -118,9 +122,9 @@ func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) er } func (c *Collection) Update(ctx context.Context, doc *client.Document) error { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, doc.Key().String()) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, doc.Key().String()) - body, err := documentJSON(doc) + body, err := doc.ToJSONPatch() if err != nil { return err } @@ -148,7 +152,7 @@ func (c *Collection) Save(ctx context.Context, doc *client.Document) error { } func (c *Collection) Delete(ctx context.Context, docKey client.DocKey) (bool, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, docKey.String()) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, docKey.String()) req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), nil) if err != nil { @@ -186,7 +190,7 @@ func (c *Collection) updateWith( ctx context.Context, request CollectionUpdateRequest, ) (*client.UpdateResult, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) body, err := json.Marshal(request) if err != nil { @@ -257,7 +261,7 @@ func (c *Collection) deleteWith( ctx context.Context, request CollectionDeleteRequest, ) (*client.DeleteResult, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) body, err := json.Marshal(request) if err != nil { @@ -302,7 +306,7 @@ func (c *Collection) Get(ctx context.Context, key client.DocKey, showDeleted boo query.Add("show_deleted", "true") } - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, key.String()) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, key.String()) methodURL.RawQuery = query.Encode() req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) @@ -313,18 +317,23 @@ func (c *Collection) Get(ctx context.Context, key client.DocKey, showDeleted boo if err := c.http.requestJson(req, &docMap); err != nil { return nil, err } - return client.NewDocFromMap(docMap) + doc, err := client.NewDocFromMap(docMap) + if err != nil { + return nil, err + } + doc.Clean() + return doc, nil } func (c *Collection) WithTxn(tx datastore.Txn) client.Collection { return &Collection{ http: c.http.withTxn(tx.ID()), - desc: c.desc, + def: c.def, } } func (c *Collection) GetAllDocKeys(ctx context.Context) (<-chan client.DocKeysResult, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name) req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) if err != nil { @@ -376,7 +385,7 @@ func (c *Collection) CreateIndex( ctx context.Context, indexDesc client.IndexDescription, ) (client.IndexDescription, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, "indexes") + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, "indexes") body, err := json.Marshal(&indexDesc) if err != nil { @@ -394,7 +403,7 @@ func (c *Collection) CreateIndex( } func (c *Collection) DropIndex(ctx context.Context, indexName string) error { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, "indexes", indexName) + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, "indexes", indexName) req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), nil) if err != nil { @@ -405,7 +414,7 @@ func (c *Collection) DropIndex(ctx context.Context, indexName string) error { } func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) { - methodURL := c.http.baseURL.JoinPath("collections", c.desc.Name, "indexes") + methodURL := c.http.baseURL.JoinPath("collections", c.Description().Name, "indexes") req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) if err != nil { @@ -415,5 +424,5 @@ func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, if err := c.http.requestJson(req, &indexes); err != nil { return nil, err } - return c.desc.Indexes, nil + return c.Description().Indexes, nil } diff --git a/http/client_p2p.go b/http/client_p2p.go new file mode 100644 index 0000000000..8d5f470f99 --- /dev/null +++ b/http/client_p2p.go @@ -0,0 +1,124 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "bytes" + "context" + "encoding/json" + "net/http" + + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/sourcenetwork/defradb/client" +) + +func (c *Client) PeerInfo() peer.AddrInfo { + methodURL := c.http.baseURL.JoinPath("p2p", "info") + + req, err := http.NewRequestWithContext(context.Background(), http.MethodGet, methodURL.String(), nil) + if err != nil { + return peer.AddrInfo{} + } + var res peer.AddrInfo + if err := c.http.requestJson(req, &res); err != nil { + return peer.AddrInfo{} + } + return res +} + +func (c *Client) SetReplicator(ctx context.Context, rep client.Replicator) error { + methodURL := c.http.baseURL.JoinPath("p2p", "replicators") + + body, err := json.Marshal(rep) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body)) + if err != nil { + return err + } + _, err = c.http.request(req) + return err +} + +func (c *Client) DeleteReplicator(ctx context.Context, rep client.Replicator) error { + methodURL := c.http.baseURL.JoinPath("p2p", "replicators") + + body, err := json.Marshal(rep) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), bytes.NewBuffer(body)) + if err != nil { + return err + } + _, err = c.http.request(req) + return err +} + +func (c *Client) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { + methodURL := c.http.baseURL.JoinPath("p2p", "replicators") + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) + if err != nil { + return nil, err + } + var reps []client.Replicator + if err := c.http.requestJson(req, &reps); err != nil { + return nil, err + } + return reps, nil +} + +func (c *Client) AddP2PCollections(ctx context.Context, collectionIDs []string) error { + methodURL := c.http.baseURL.JoinPath("p2p", "collections") + + body, err := json.Marshal(collectionIDs) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, http.MethodPost, methodURL.String(), bytes.NewBuffer(body)) + if err != nil { + return err + } + _, err = c.http.request(req) + return err +} + +func (c *Client) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { + methodURL := c.http.baseURL.JoinPath("p2p", "collections") + + body, err := json.Marshal(collectionIDs) + if err != nil { + return err + } + req, err := http.NewRequestWithContext(ctx, http.MethodDelete, methodURL.String(), bytes.NewBuffer(body)) + if err != nil { + return err + } + _, err = c.http.request(req) + return err +} + +func (c *Client) GetAllP2PCollections(ctx context.Context) ([]string, error) { + methodURL := c.http.baseURL.JoinPath("p2p", "collections") + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, methodURL.String(), nil) + if err != nil { + return nil, err + } + var cols []string + if err := c.http.requestJson(req, &cols); err != nil { + return nil, err + } + return cols, nil +} diff --git a/http/client_tx.go b/http/client_tx.go index 8df82007a6..f1f2830006 100644 --- a/http/client_tx.go +++ b/http/client_tx.go @@ -26,6 +26,14 @@ type Transaction struct { http *httpClient } +func NewTransaction(rawURL string, id uint64) (*Transaction, error) { + httpClient, err := newHttpClient(rawURL) + if err != nil { + return nil, err + } + return &Transaction{id, httpClient}, nil +} + func (c *Transaction) ID() uint64 { return c.id } @@ -75,6 +83,10 @@ func (c *Transaction) Headstore() datastore.DSReaderWriter { panic("client side transaction") } +func (c *Transaction) Peerstore() datastore.DSBatching { + panic("client side transaction") +} + func (c *Transaction) DAGstore() datastore.DAGStore { panic("client side transaction") } diff --git a/http/errors.go b/http/errors.go index c2808603cf..7e07053df5 100644 --- a/http/errors.go +++ b/http/errors.go @@ -15,22 +15,29 @@ import ( "errors" ) -const ( - errInvalidRequestBody = "invalid request body" - errDocKeyDoesNotMatch = "document key does not match" - errStreamingNotSupported = "streaming not supported" - errMigrationNotFound = "migration not found" - errMissingRequest = "missing request" - errInvalidTransactionId = "invalid transaction id" -) - +// Errors returnable from this package. +// +// This list is incomplete. Undefined errors may also be returned. +// Errors returned from this package may be tested against these errors with errors.Is. var ( - ErrInvalidRequestBody = errors.New(errInvalidRequestBody) - ErrDocKeyDoesNotMatch = errors.New(errDocKeyDoesNotMatch) - ErrStreamingNotSupported = errors.New(errStreamingNotSupported) - ErrMigrationNotFound = errors.New(errMigrationNotFound) - ErrMissingRequest = errors.New(errMissingRequest) - ErrInvalidTransactionId = errors.New(errInvalidTransactionId) + ErrNoListener = errors.New("cannot serve with no listener") + ErrSchema = errors.New("base must start with the http or https scheme") + ErrDatabaseNotAvailable = errors.New("no database available") + ErrFormNotSupported = errors.New("content type application/x-www-form-urlencoded not yet supported") + ErrBodyEmpty = errors.New("body cannot be empty") + ErrMissingGQLRequest = errors.New("missing GraphQL request") + ErrPeerIdUnavailable = errors.New("no PeerID available. P2P might be disabled") + ErrStreamingUnsupported = errors.New("streaming unsupported") + ErrNoEmail = errors.New("email address must be specified for tls with autocert") + ErrPayloadFormat = errors.New("invalid payload format") + ErrMissingNewKey = errors.New("missing _newKey for imported doc") + ErrInvalidRequestBody = errors.New("invalid request body") + ErrDocKeyDoesNotMatch = errors.New("document key does not match") + ErrStreamingNotSupported = errors.New("streaming not supported") + ErrMigrationNotFound = errors.New("migration not found") + ErrMissingRequest = errors.New("missing request") + ErrInvalidTransactionId = errors.New("invalid transaction id") + ErrP2PDisabled = errors.New("p2p network is disabled") ) type errorResponse struct { diff --git a/http/handler.go b/http/handler.go new file mode 100644 index 0000000000..1df8987964 --- /dev/null +++ b/http/handler.go @@ -0,0 +1,107 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "context" + "fmt" + "net/http" + "sync" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore" + + "github.com/go-chi/chi/v5" + "github.com/go-chi/chi/v5/middleware" +) + +// Version is the identifier for the current API version. +var Version string = "v0" + +// playgroundHandler is set when building with the playground build tag +var playgroundHandler http.Handler = http.HandlerFunc(http.NotFound) + +type Handler struct { + db client.DB + mux *chi.Mux + txs *sync.Map +} + +func NewHandler(db client.DB, opts ServerOptions) (*Handler, error) { + txs := &sync.Map{} + + tx_handler := &txHandler{} + store_handler := &storeHandler{} + collection_handler := &collectionHandler{} + p2p_handler := &p2pHandler{} + lens_handler := &lensHandler{} + ccip_handler := &ccipHandler{} + + router, err := NewRouter() + if err != nil { + return nil, err + } + + router.AddMiddleware( + ApiMiddleware(db, txs, opts), + TransactionMiddleware, + StoreMiddleware, + ) + + tx_handler.bindRoutes(router) + store_handler.bindRoutes(router) + p2p_handler.bindRoutes(router) + ccip_handler.bindRoutes(router) + + router.AddRouteGroup(func(r *Router) { + r.AddMiddleware(CollectionMiddleware) + collection_handler.bindRoutes(r) + }) + + router.AddRouteGroup(func(r *Router) { + r.AddMiddleware(LensMiddleware) + lens_handler.bindRoutes(r) + }) + + if err := router.Validate(context.Background()); err != nil { + return nil, err + } + + mux := chi.NewMux() + mux.Use( + middleware.RequestLogger(&logFormatter{}), + middleware.Recoverer, + CorsMiddleware(opts), + ) + mux.Mount("/api/"+Version, router) + mux.Get("/openapi.json", func(rw http.ResponseWriter, req *http.Request) { + responseJSON(rw, http.StatusOK, router.OpenAPI()) + }) + mux.Handle("/*", playgroundHandler) + + return &Handler{ + db: db, + mux: mux, + txs: txs, + }, nil +} + +func (h *Handler) Transaction(id uint64) (datastore.Txn, error) { + tx, ok := h.txs.Load(id) + if !ok { + return nil, fmt.Errorf("invalid transaction id") + } + return tx.(datastore.Txn), nil +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, req *http.Request) { + h.mux.ServeHTTP(w, req) +} diff --git a/http/handler_ccip.go b/http/handler_ccip.go new file mode 100644 index 0000000000..d2a9ad6783 --- /dev/null +++ b/http/handler_ccip.go @@ -0,0 +1,124 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "encoding/hex" + "encoding/json" + "net/http" + "strings" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/go-chi/chi/v5" + + "github.com/sourcenetwork/defradb/client" +) + +type ccipHandler struct{} + +type CCIPRequest struct { + Sender string `json:"sender"` + Data string `json:"data"` +} + +type CCIPResponse struct { + Data string `json:"data"` +} + +// ExecCCIP handles GraphQL over Cross Chain Interoperability Protocol requests. +func (c *ccipHandler) ExecCCIP(rw http.ResponseWriter, req *http.Request) { + store := req.Context().Value(storeContextKey).(client.Store) + + var ccipReq CCIPRequest + switch req.Method { + case http.MethodGet: + ccipReq.Sender = chi.URLParam(req, "sender") + ccipReq.Data = chi.URLParam(req, "data") + case http.MethodPost: + if err := requestJSON(req, &ccipReq); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + } + + data, err := hex.DecodeString(strings.TrimPrefix(ccipReq.Data, "0x")) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + var request GraphQLRequest + if err := json.Unmarshal(data, &request); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + + result := store.ExecRequest(req.Context(), request.Query) + if result.Pub != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrStreamingNotSupported}) + return + } + resultJSON, err := json.Marshal(result.GQL) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + resultHex := "0x" + hex.EncodeToString(resultJSON) + responseJSON(rw, http.StatusOK, CCIPResponse{Data: resultHex}) +} + +func (h *ccipHandler) bindRoutes(router *Router) { + errorResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/error", + } + ccipRequestSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/ccip_request", + } + ccipResponseSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/ccip_response", + } + + ccipRequest := openapi3.NewRequestBody(). + WithContent(openapi3.NewContentWithJSONSchemaRef(ccipRequestSchema)) + + ccipResponse := openapi3.NewResponse(). + WithDescription("GraphQL response"). + WithContent(openapi3.NewContentWithJSONSchemaRef(ccipResponseSchema)) + + ccipPost := openapi3.NewOperation() + ccipPost.Description = "CCIP POST endpoint" + ccipPost.OperationID = "ccip_post" + ccipPost.Tags = []string{"ccip"} + ccipPost.RequestBody = &openapi3.RequestBodyRef{ + Value: ccipRequest, + } + ccipPost.AddResponse(200, ccipResponse) + ccipPost.Responses["400"] = errorResponse + + dataPathParam := openapi3.NewPathParameter("data"). + WithDescription("Hex encoded request data"). + WithSchema(openapi3.NewStringSchema()) + + senderPathParam := openapi3.NewPathParameter("sender"). + WithDescription("Hex encoded sender address"). + WithSchema(openapi3.NewStringSchema()) + + ccipGet := openapi3.NewOperation() + ccipGet.Description = "CCIP GET endpoint" + ccipGet.OperationID = "ccip_get" + ccipGet.Tags = []string{"ccip"} + ccipGet.AddParameter(dataPathParam) + ccipGet.AddParameter(senderPathParam) + ccipGet.AddResponse(200, ccipResponse) + ccipGet.Responses["400"] = errorResponse + + router.AddRoute("/ccip/{sender}/{data}", http.MethodGet, ccipGet, h.ExecCCIP) + router.AddRoute("/ccip", http.MethodPost, ccipPost, h.ExecCCIP) +} diff --git a/http/handler_ccip_test.go b/http/handler_ccip_test.go new file mode 100644 index 0000000000..66ac173a54 --- /dev/null +++ b/http/handler_ccip_test.go @@ -0,0 +1,213 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "path" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore/memory" + "github.com/sourcenetwork/defradb/db" +) + +func TestCCIPGet_WithValidData(t *testing.T) { + cdb := setupDatabase(t) + + gqlData, err := json.Marshal(&GraphQLRequest{ + Query: `query { + User { + name + } + }`, + }) + require.NoError(t, err) + + data := "0x" + hex.EncodeToString([]byte(gqlData)) + sender := "0x0000000000000000000000000000000000000000" + url := "http://localhost:9181/api/v0/ccip/" + path.Join(sender, data) + + req := httptest.NewRequest(http.MethodGet, url, nil) + rec := httptest.NewRecorder() + + handler, err := NewHandler(cdb, ServerOptions{}) + require.NoError(t, err) + handler.ServeHTTP(rec, req) + + res := rec.Result() + require.NotNil(t, res.Body) + + resData, err := io.ReadAll(res.Body) + require.NoError(t, err) + + var ccipRes CCIPResponse + err = json.Unmarshal(resData, &ccipRes) + require.NoError(t, err) + + resHex, err := hex.DecodeString(strings.TrimPrefix(ccipRes.Data, "0x")) + require.NoError(t, err) + + assert.JSONEq(t, `{"data": [{"name": "bob"}]}`, string(resHex)) +} + +func TestCCIPGet_WithSubscription(t *testing.T) { + cdb := setupDatabase(t) + + gqlData, err := json.Marshal(&GraphQLRequest{ + Query: `subscription { + User { + name + } + }`, + }) + require.NoError(t, err) + + data := "0x" + hex.EncodeToString([]byte(gqlData)) + sender := "0x0000000000000000000000000000000000000000" + url := "http://localhost:9181/api/v0/ccip/" + path.Join(sender, data) + + req := httptest.NewRequest(http.MethodGet, url, nil) + rec := httptest.NewRecorder() + + handler, err := NewHandler(cdb, ServerOptions{}) + require.NoError(t, err) + handler.ServeHTTP(rec, req) + + res := rec.Result() + assert.Equal(t, 400, res.StatusCode) +} + +func TestCCIPGet_WithInvalidData(t *testing.T) { + cdb := setupDatabase(t) + + data := "invalid_hex_data" + sender := "0x0000000000000000000000000000000000000000" + url := "http://localhost:9181/api/v0/ccip/" + path.Join(sender, data) + + req := httptest.NewRequest(http.MethodGet, url, nil) + rec := httptest.NewRecorder() + + handler, err := NewHandler(cdb, ServerOptions{}) + require.NoError(t, err) + handler.ServeHTTP(rec, req) + + res := rec.Result() + assert.Equal(t, 400, res.StatusCode) +} + +func TestCCIPPost_WithValidData(t *testing.T) { + cdb := setupDatabase(t) + + gqlJSON, err := json.Marshal(&GraphQLRequest{ + Query: `query { + User { + name + } + }`, + }) + require.NoError(t, err) + + body, err := json.Marshal(&CCIPRequest{ + Data: "0x" + hex.EncodeToString([]byte(gqlJSON)), + Sender: "0x0000000000000000000000000000000000000000", + }) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", bytes.NewBuffer(body)) + rec := httptest.NewRecorder() + + handler, err := NewHandler(cdb, ServerOptions{}) + require.NoError(t, err) + handler.ServeHTTP(rec, req) + + res := rec.Result() + require.NotNil(t, res.Body) + + resData, err := io.ReadAll(res.Body) + require.NoError(t, err) + + var ccipRes CCIPResponse + err = json.Unmarshal(resData, &ccipRes) + require.NoError(t, err) + + resHex, err := hex.DecodeString(strings.TrimPrefix(ccipRes.Data, "0x")) + require.NoError(t, err) + + assert.JSONEq(t, `{"data": [{"name": "bob"}]}`, string(resHex)) +} + +func TestCCIPPost_WithInvalidGraphQLRequest(t *testing.T) { + cdb := setupDatabase(t) + + body, err := json.Marshal(&CCIPRequest{ + Data: "0x" + hex.EncodeToString([]byte("invalid_graphql_request")), + Sender: "0x0000000000000000000000000000000000000000", + }) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", bytes.NewBuffer(body)) + rec := httptest.NewRecorder() + + handler, err := NewHandler(cdb, ServerOptions{}) + require.NoError(t, err) + handler.ServeHTTP(rec, req) + + res := rec.Result() + assert.Equal(t, 400, res.StatusCode) +} + +func TestCCIPPost_WithInvalidBody(t *testing.T) { + cdb := setupDatabase(t) + + req := httptest.NewRequest(http.MethodPost, "http://localhost:9181/api/v0/ccip", nil) + rec := httptest.NewRecorder() + + handler, err := NewHandler(cdb, ServerOptions{}) + require.NoError(t, err) + handler.ServeHTTP(rec, req) + + res := rec.Result() + assert.Equal(t, 400, res.StatusCode) +} + +func setupDatabase(t *testing.T) client.DB { + ctx := context.Background() + + cdb, err := db.NewDB(ctx, memory.NewDatastore(ctx), db.WithUpdateEvents()) + require.NoError(t, err) + + _, err = cdb.AddSchema(ctx, `type User { + name: String + }`) + require.NoError(t, err) + + col, err := cdb.GetCollectionByName(ctx, "User") + require.NoError(t, err) + + doc, err := client.NewDocFromJSON([]byte(`{"name": "bob"}`)) + require.NoError(t, err) + + err = col.Create(ctx, doc) + require.NoError(t, err) + + return cdb +} diff --git a/http/handler_collection.go b/http/handler_collection.go index 8f8ff8423b..a5622f1336 100644 --- a/http/handler_collection.go +++ b/http/handler_collection.go @@ -17,6 +17,7 @@ import ( "net/http" "strconv" + "github.com/getkin/kin-openapi/openapi3" "github.com/go-chi/chi/v5" "github.com/sourcenetwork/defradb/client" @@ -47,9 +48,14 @@ func (s *collectionHandler) Create(rw http.ResponseWriter, req *http.Request) { } switch t := body.(type) { - case []map[string]any: + case []any: var docList []*client.Document - for _, docMap := range t { + for _, v := range t { + docMap, ok := v.(map[string]any) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrInvalidRequestBody}) + return + } doc, err := client.NewDocFromMap(docMap) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) @@ -326,3 +332,202 @@ func (s *collectionHandler) DropIndex(rw http.ResponseWriter, req *http.Request) } rw.WriteHeader(http.StatusOK) } + +func (h *collectionHandler) bindRoutes(router *Router) { + errorResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/error", + } + successResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/success", + } + collectionUpdateSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/collection_update", + } + updateResultSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/update_result", + } + collectionDeleteSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/collection_delete", + } + deleteResultSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/delete_result", + } + documentSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/document", + } + indexSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/index", + } + + collectionNamePathParam := openapi3.NewPathParameter("name"). + WithDescription("Collection name"). + WithRequired(true). + WithSchema(openapi3.NewStringSchema()) + + documentArraySchema := openapi3.NewArraySchema() + documentArraySchema.Items = documentSchema + + collectionCreateSchema := openapi3.NewOneOfSchema() + collectionCreateSchema.OneOf = openapi3.SchemaRefs{ + documentSchema, + openapi3.NewSchemaRef("", documentArraySchema), + } + + collectionCreateRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchema(collectionCreateSchema)) + + collectionCreate := openapi3.NewOperation() + collectionCreate.OperationID = "collection_create" + collectionCreate.Description = "Create document(s) in a collection" + collectionCreate.Tags = []string{"collection"} + collectionCreate.AddParameter(collectionNamePathParam) + collectionCreate.RequestBody = &openapi3.RequestBodyRef{ + Value: collectionCreateRequest, + } + collectionCreate.Responses = make(openapi3.Responses) + collectionCreate.Responses["200"] = successResponse + collectionCreate.Responses["400"] = errorResponse + + collectionUpdateWithRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchemaRef(collectionUpdateSchema)) + + collectionUpdateWithResponse := openapi3.NewResponse(). + WithDescription("Update results"). + WithJSONSchemaRef(updateResultSchema) + + collectionUpdateWith := openapi3.NewOperation() + collectionUpdateWith.OperationID = "collection_update_with" + collectionUpdateWith.Description = "Update document(s) in a collection" + collectionUpdateWith.Tags = []string{"collection"} + collectionUpdateWith.AddParameter(collectionNamePathParam) + collectionUpdateWith.RequestBody = &openapi3.RequestBodyRef{ + Value: collectionUpdateWithRequest, + } + collectionUpdateWith.AddResponse(200, collectionUpdateWithResponse) + collectionUpdateWith.Responses["400"] = errorResponse + + collectionDeleteWithRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchemaRef(collectionDeleteSchema)) + + collectionDeleteWithResponse := openapi3.NewResponse(). + WithDescription("Delete results"). + WithJSONSchemaRef(deleteResultSchema) + + collectionDeleteWith := openapi3.NewOperation() + collectionDeleteWith.OperationID = "collections_delete_with" + collectionDeleteWith.Description = "Delete document(s) from a collection" + collectionDeleteWith.Tags = []string{"collection"} + collectionDeleteWith.AddParameter(collectionNamePathParam) + collectionDeleteWith.RequestBody = &openapi3.RequestBodyRef{ + Value: collectionDeleteWithRequest, + } + collectionDeleteWith.AddResponse(200, collectionDeleteWithResponse) + collectionDeleteWith.Responses["400"] = errorResponse + + createIndexRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchemaRef(indexSchema)) + createIndexResponse := openapi3.NewResponse(). + WithDescription("Index description"). + WithJSONSchemaRef(indexSchema) + + createIndex := openapi3.NewOperation() + createIndex.OperationID = "index_create" + createIndex.Description = "Create a secondary index" + createIndex.Tags = []string{"index"} + createIndex.AddParameter(collectionNamePathParam) + createIndex.RequestBody = &openapi3.RequestBodyRef{ + Value: createIndexRequest, + } + createIndex.AddResponse(200, createIndexResponse) + createIndex.Responses["400"] = errorResponse + + indexArraySchema := openapi3.NewArraySchema() + indexArraySchema.Items = indexSchema + + getIndexesResponse := openapi3.NewResponse(). + WithDescription("List of indexes"). + WithJSONSchema(indexArraySchema) + + getIndexes := openapi3.NewOperation() + getIndexes.OperationID = "index_list" + getIndexes.Description = "List secondary indexes" + getIndexes.Tags = []string{"index"} + getIndexes.AddParameter(collectionNamePathParam) + getIndexes.AddResponse(200, getIndexesResponse) + getIndexes.Responses["400"] = errorResponse + + indexPathParam := openapi3.NewPathParameter("index"). + WithRequired(true). + WithSchema(openapi3.NewStringSchema()) + + dropIndex := openapi3.NewOperation() + dropIndex.OperationID = "index_drop" + dropIndex.Description = "Delete a secondary index" + dropIndex.Tags = []string{"index"} + dropIndex.AddParameter(collectionNamePathParam) + dropIndex.AddParameter(indexPathParam) + dropIndex.Responses = make(openapi3.Responses) + dropIndex.Responses["200"] = successResponse + dropIndex.Responses["400"] = errorResponse + + documentKeyPathParam := openapi3.NewPathParameter("key"). + WithRequired(true). + WithSchema(openapi3.NewStringSchema()) + + collectionGetResponse := openapi3.NewResponse(). + WithDescription("Document value"). + WithJSONSchemaRef(documentSchema) + + collectionGet := openapi3.NewOperation() + collectionGet.Description = "Get a document by key" + collectionGet.OperationID = "collection_get" + collectionGet.Tags = []string{"collection"} + collectionGet.AddParameter(collectionNamePathParam) + collectionGet.AddParameter(documentKeyPathParam) + collectionGet.AddResponse(200, collectionGetResponse) + collectionGet.Responses["400"] = errorResponse + + collectionUpdate := openapi3.NewOperation() + collectionUpdate.Description = "Update a document by key" + collectionUpdate.OperationID = "collection_update" + collectionUpdate.Tags = []string{"collection"} + collectionUpdate.AddParameter(collectionNamePathParam) + collectionUpdate.AddParameter(documentKeyPathParam) + collectionUpdate.Responses = make(openapi3.Responses) + collectionUpdate.Responses["200"] = successResponse + collectionUpdate.Responses["400"] = errorResponse + + collectionDelete := openapi3.NewOperation() + collectionDelete.Description = "Delete a document by key" + collectionDelete.OperationID = "collection_delete" + collectionDelete.Tags = []string{"collection"} + collectionDelete.AddParameter(collectionNamePathParam) + collectionDelete.AddParameter(documentKeyPathParam) + collectionDelete.Responses = make(openapi3.Responses) + collectionDelete.Responses["200"] = successResponse + collectionDelete.Responses["400"] = errorResponse + + collectionKeys := openapi3.NewOperation() + collectionKeys.AddParameter(collectionNamePathParam) + collectionKeys.Description = "Get all document keys" + collectionKeys.OperationID = "collection_keys" + collectionKeys.Tags = []string{"collection"} + collectionKeys.Responses = make(openapi3.Responses) + collectionKeys.Responses["200"] = successResponse + collectionKeys.Responses["400"] = errorResponse + + router.AddRoute("/collections/{name}", http.MethodGet, collectionKeys, h.GetAllDocKeys) + router.AddRoute("/collections/{name}", http.MethodPost, collectionCreate, h.Create) + router.AddRoute("/collections/{name}", http.MethodPatch, collectionUpdateWith, h.UpdateWith) + router.AddRoute("/collections/{name}", http.MethodDelete, collectionDeleteWith, h.DeleteWith) + router.AddRoute("/collections/{name}/indexes", http.MethodPost, createIndex, h.CreateIndex) + router.AddRoute("/collections/{name}/indexes", http.MethodGet, getIndexes, h.GetIndexes) + router.AddRoute("/collections/{name}/indexes/{index}", http.MethodDelete, dropIndex, h.DropIndex) + router.AddRoute("/collections/{name}/{key}", http.MethodGet, collectionGet, h.Get) + router.AddRoute("/collections/{name}/{key}", http.MethodPatch, collectionUpdate, h.Update) + router.AddRoute("/collections/{name}/{key}", http.MethodDelete, collectionDelete, h.Delete) +} diff --git a/http/handler_lens.go b/http/handler_lens.go index ccf8dd01a8..a06a4d09f1 100644 --- a/http/handler_lens.go +++ b/http/handler_lens.go @@ -13,6 +13,7 @@ package http import ( "net/http" + "github.com/getkin/kin-openapi/openapi3" "github.com/go-chi/chi/v5" "github.com/sourcenetwork/immutable/enumerable" @@ -61,7 +62,15 @@ func (s *lensHandler) MigrateUp(rw http.ResponseWriter, req *http.Request) { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - responseJSON(rw, http.StatusOK, result) + var value []map[string]any + err = enumerable.ForEach(result, func(item map[string]any) { + value = append(value, item) + }) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, value) } func (s *lensHandler) MigrateDown(rw http.ResponseWriter, req *http.Request) { @@ -77,7 +86,15 @@ func (s *lensHandler) MigrateDown(rw http.ResponseWriter, req *http.Request) { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - responseJSON(rw, http.StatusOK, result) + var value []map[string]any + err = enumerable.ForEach(result, func(item map[string]any) { + value = append(value, item) + }) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, value) } func (s *lensHandler) Config(rw http.ResponseWriter, req *http.Request) { @@ -105,3 +122,103 @@ func (s *lensHandler) HasMigration(rw http.ResponseWriter, req *http.Request) { } rw.WriteHeader(http.StatusOK) } + +func (h *lensHandler) bindRoutes(router *Router) { + errorResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/error", + } + successResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/success", + } + documentSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/document", + } + + lensConfigSchema := openapi3.NewSchemaRef("#/components/schemas/lens_config", nil) + lensConfigArraySchema := openapi3.NewArraySchema() + lensConfigArraySchema.Items = lensConfigSchema + + lensConfigResponse := openapi3.NewResponse(). + WithDescription("Lens configurations"). + WithJSONSchema(lensConfigArraySchema) + + lensConfig := openapi3.NewOperation() + lensConfig.OperationID = "lens_config" + lensConfig.Description = "List lens migrations" + lensConfig.Tags = []string{"lens"} + lensConfig.AddResponse(200, lensConfigResponse) + lensConfig.Responses["400"] = errorResponse + + setMigrationRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithJSONSchemaRef(lensConfigSchema) + + setMigration := openapi3.NewOperation() + setMigration.OperationID = "lens_set_migration" + setMigration.Description = "Add a new lens migration" + setMigration.Tags = []string{"lens"} + setMigration.RequestBody = &openapi3.RequestBodyRef{ + Value: setMigrationRequest, + } + setMigration.Responses = make(openapi3.Responses) + setMigration.Responses["200"] = successResponse + setMigration.Responses["400"] = errorResponse + + reloadLenses := openapi3.NewOperation() + reloadLenses.OperationID = "lens_reload" + reloadLenses.Description = "Reload lens migrations" + reloadLenses.Tags = []string{"lens"} + reloadLenses.Responses = make(openapi3.Responses) + reloadLenses.Responses["200"] = successResponse + reloadLenses.Responses["400"] = errorResponse + + versionPathParam := openapi3.NewPathParameter("version"). + WithRequired(true). + WithSchema(openapi3.NewStringSchema()) + + hasMigration := openapi3.NewOperation() + hasMigration.OperationID = "lens_has_migration" + hasMigration.Description = "Check if a migration exists" + hasMigration.Tags = []string{"lens"} + hasMigration.AddParameter(versionPathParam) + hasMigration.Responses = make(openapi3.Responses) + hasMigration.Responses["200"] = successResponse + hasMigration.Responses["400"] = errorResponse + + migrateSchema := openapi3.NewArraySchema() + migrateSchema.Items = documentSchema + migrateRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchema(migrateSchema)) + + migrateUp := openapi3.NewOperation() + migrateUp.OperationID = "lens_migrate_up" + migrateUp.Description = "Migrate documents to a schema version" + migrateUp.Tags = []string{"lens"} + migrateUp.RequestBody = &openapi3.RequestBodyRef{ + Value: migrateRequest, + } + migrateUp.AddParameter(versionPathParam) + migrateUp.Responses = make(openapi3.Responses) + migrateUp.Responses["200"] = successResponse + migrateUp.Responses["400"] = errorResponse + + migrateDown := openapi3.NewOperation() + migrateDown.OperationID = "lens_migrate_down" + migrateDown.Description = "Migrate documents from a schema version" + migrateDown.Tags = []string{"lens"} + migrateDown.RequestBody = &openapi3.RequestBodyRef{ + Value: migrateRequest, + } + migrateDown.AddParameter(versionPathParam) + migrateDown.Responses = make(openapi3.Responses) + migrateDown.Responses["200"] = successResponse + migrateDown.Responses["400"] = errorResponse + + router.AddRoute("/lens", http.MethodGet, lensConfig, h.Config) + router.AddRoute("/lens", http.MethodPost, setMigration, h.SetMigration) + router.AddRoute("/lens/reload", http.MethodPost, reloadLenses, h.ReloadLenses) + router.AddRoute("/lens/{version}", http.MethodGet, hasMigration, h.HasMigration) + router.AddRoute("/lens/{version}/up", http.MethodPost, migrateUp, h.MigrateUp) + router.AddRoute("/lens/{version}/down", http.MethodPost, migrateDown, h.MigrateDown) +} diff --git a/http/handler_p2p.go b/http/handler_p2p.go new file mode 100644 index 0000000000..73727ec297 --- /dev/null +++ b/http/handler_p2p.go @@ -0,0 +1,252 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "net/http" + + "github.com/getkin/kin-openapi/openapi3" + + "github.com/sourcenetwork/defradb/client" +) + +type p2pHandler struct{} + +func (s *p2pHandler) PeerInfo(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + responseJSON(rw, http.StatusOK, p2p.PeerInfo()) +} + +func (s *p2pHandler) SetReplicator(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + + var rep client.Replicator + if err := requestJSON(req, &rep); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + err := p2p.SetReplicator(req.Context(), rep) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + rw.WriteHeader(http.StatusOK) +} + +func (s *p2pHandler) DeleteReplicator(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + + var rep client.Replicator + if err := requestJSON(req, &rep); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + err := p2p.DeleteReplicator(req.Context(), rep) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + rw.WriteHeader(http.StatusOK) +} + +func (s *p2pHandler) GetAllReplicators(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + + reps, err := p2p.GetAllReplicators(req.Context()) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, reps) +} + +func (s *p2pHandler) AddP2PCollection(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + + var collectionIDs []string + if err := requestJSON(req, &collectionIDs); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + err := p2p.AddP2PCollections(req.Context(), collectionIDs) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + rw.WriteHeader(http.StatusOK) +} + +func (s *p2pHandler) RemoveP2PCollection(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + + var collectionIDs []string + if err := requestJSON(req, &collectionIDs); err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + err := p2p.RemoveP2PCollections(req.Context(), collectionIDs) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + rw.WriteHeader(http.StatusOK) +} + +func (s *p2pHandler) GetAllP2PCollections(rw http.ResponseWriter, req *http.Request) { + p2p, ok := req.Context().Value(dbContextKey).(client.P2P) + if !ok { + responseJSON(rw, http.StatusBadRequest, errorResponse{ErrP2PDisabled}) + return + } + + cols, err := p2p.GetAllP2PCollections(req.Context()) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, cols) +} + +func (h *p2pHandler) bindRoutes(router *Router) { + successResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/success", + } + errorResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/error", + } + peerInfoSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/peer_info", + } + replicatorSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/replicator", + } + + peerInfoResponse := openapi3.NewResponse(). + WithDescription("Peer network info"). + WithContent(openapi3.NewContentWithJSONSchemaRef(peerInfoSchema)) + + peerInfo := openapi3.NewOperation() + peerInfo.OperationID = "peer_info" + peerInfo.Tags = []string{"p2p"} + peerInfo.AddResponse(200, peerInfoResponse) + peerInfo.Responses["400"] = errorResponse + + getReplicatorsSchema := openapi3.NewArraySchema() + getReplicatorsSchema.Items = replicatorSchema + getReplicatorsResponse := openapi3.NewResponse(). + WithDescription("Replicators"). + WithContent(openapi3.NewContentWithJSONSchema(getReplicatorsSchema)) + + getReplicators := openapi3.NewOperation() + getReplicators.Description = "List peer replicators" + getReplicators.OperationID = "peer_replicator_list" + getReplicators.Tags = []string{"p2p"} + getReplicators.AddResponse(200, getReplicatorsResponse) + getReplicators.Responses["400"] = errorResponse + + replicatorRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchemaRef(replicatorSchema)) + + setReplicator := openapi3.NewOperation() + setReplicator.Description = "Add peer replicators" + setReplicator.OperationID = "peer_replicator_set" + setReplicator.Tags = []string{"p2p"} + setReplicator.RequestBody = &openapi3.RequestBodyRef{ + Value: replicatorRequest, + } + setReplicator.Responses = make(openapi3.Responses) + setReplicator.Responses["200"] = successResponse + setReplicator.Responses["400"] = errorResponse + + deleteReplicator := openapi3.NewOperation() + deleteReplicator.Description = "Delete peer replicators" + deleteReplicator.OperationID = "peer_replicator_delete" + deleteReplicator.Tags = []string{"p2p"} + deleteReplicator.RequestBody = &openapi3.RequestBodyRef{ + Value: replicatorRequest, + } + deleteReplicator.Responses = make(openapi3.Responses) + deleteReplicator.Responses["200"] = successResponse + deleteReplicator.Responses["400"] = errorResponse + + peerCollectionsSchema := openapi3.NewArraySchema(). + WithItems(openapi3.NewStringSchema()) + + peerCollectionRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithContent(openapi3.NewContentWithJSONSchema(peerCollectionsSchema)) + + getPeerCollectionsResponse := openapi3.NewResponse(). + WithDescription("Peer collections"). + WithContent(openapi3.NewContentWithJSONSchema(peerCollectionsSchema)) + + getPeerCollections := openapi3.NewOperation() + getPeerCollections.Description = "List peer collections" + getPeerCollections.OperationID = "peer_collection_list" + getPeerCollections.Tags = []string{"p2p"} + getPeerCollections.AddResponse(200, getPeerCollectionsResponse) + getPeerCollections.Responses["400"] = errorResponse + + addPeerCollections := openapi3.NewOperation() + addPeerCollections.Description = "Add peer collections" + addPeerCollections.OperationID = "peer_collection_add" + addPeerCollections.Tags = []string{"p2p"} + addPeerCollections.RequestBody = &openapi3.RequestBodyRef{ + Value: peerCollectionRequest, + } + addPeerCollections.Responses = make(openapi3.Responses) + addPeerCollections.Responses["200"] = successResponse + addPeerCollections.Responses["400"] = errorResponse + + removePeerCollections := openapi3.NewOperation() + removePeerCollections.Description = "Remove peer collections" + removePeerCollections.OperationID = "peer_collection_remove" + removePeerCollections.Tags = []string{"p2p"} + removePeerCollections.RequestBody = &openapi3.RequestBodyRef{ + Value: peerCollectionRequest, + } + removePeerCollections.Responses = make(openapi3.Responses) + removePeerCollections.Responses["200"] = successResponse + removePeerCollections.Responses["400"] = errorResponse + + router.AddRoute("/p2p/info", http.MethodGet, peerInfo, h.PeerInfo) + router.AddRoute("/p2p/replicators", http.MethodGet, getReplicators, h.GetAllReplicators) + router.AddRoute("/p2p/replicators", http.MethodPost, setReplicator, h.SetReplicator) + router.AddRoute("/p2p/replicators", http.MethodDelete, deleteReplicator, h.DeleteReplicator) + router.AddRoute("/p2p/collections", http.MethodGet, getPeerCollections, h.GetAllP2PCollections) + router.AddRoute("/p2p/collections", http.MethodPost, addPeerCollections, h.AddP2PCollection) + router.AddRoute("/p2p/collections", http.MethodDelete, removePeerCollections, h.RemoveP2PCollection) +} diff --git a/api/http/playground.go b/http/handler_playground.go similarity index 100% rename from api/http/playground.go rename to http/handler_playground.go diff --git a/http/handler_store.go b/http/handler_store.go index d0cbdf42d2..aadbb37731 100644 --- a/http/handler_store.go +++ b/http/handler_store.go @@ -17,38 +17,22 @@ import ( "io" "net/http" - "github.com/go-chi/chi/v5" + "github.com/getkin/kin-openapi/openapi3" "github.com/sourcenetwork/defradb/client" ) type storeHandler struct{} -func (s *storeHandler) SetReplicator(rw http.ResponseWriter, req *http.Request) { - store := req.Context().Value(storeContextKey).(client.Store) - - var rep client.Replicator - if err := requestJSON(req, &rep); err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - err := store.SetReplicator(req.Context(), rep) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - rw.WriteHeader(http.StatusOK) -} - -func (s *storeHandler) DeleteReplicator(rw http.ResponseWriter, req *http.Request) { +func (s *storeHandler) BasicImport(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) - var rep client.Replicator - if err := requestJSON(req, &rep); err != nil { + var config client.BackupConfig + if err := requestJSON(req, &config); err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - err := store.DeleteReplicator(req.Context(), rep) + err := store.BasicImport(req.Context(), config.Filepath) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -56,21 +40,15 @@ func (s *storeHandler) DeleteReplicator(rw http.ResponseWriter, req *http.Reques rw.WriteHeader(http.StatusOK) } -func (s *storeHandler) GetAllReplicators(rw http.ResponseWriter, req *http.Request) { +func (s *storeHandler) BasicExport(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) - reps, err := store.GetAllReplicators(req.Context()) - if err != nil { + var config client.BackupConfig + if err := requestJSON(req, &config); err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - responseJSON(rw, http.StatusOK, reps) -} - -func (s *storeHandler) AddP2PCollection(rw http.ResponseWriter, req *http.Request) { - store := req.Context().Value(storeContextKey).(client.Store) - - err := store.AddP2PCollection(req.Context(), chi.URLParam(req, "id")) + err := store.BasicExport(req.Context(), &config) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -78,21 +56,15 @@ func (s *storeHandler) AddP2PCollection(rw http.ResponseWriter, req *http.Reques rw.WriteHeader(http.StatusOK) } -func (s *storeHandler) RemoveP2PCollection(rw http.ResponseWriter, req *http.Request) { +func (s *storeHandler) AddSchema(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) - err := store.RemoveP2PCollection(req.Context(), chi.URLParam(req, "id")) + schema, err := io.ReadAll(req.Body) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - rw.WriteHeader(http.StatusOK) -} - -func (s *storeHandler) GetAllP2PCollections(rw http.ResponseWriter, req *http.Request) { - store := req.Context().Value(storeContextKey).(client.Store) - - cols, err := store.GetAllP2PCollections(req.Context()) + cols, err := store.AddSchema(req.Context(), string(schema)) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -100,31 +72,17 @@ func (s *storeHandler) GetAllP2PCollections(rw http.ResponseWriter, req *http.Re responseJSON(rw, http.StatusOK, cols) } -func (s *storeHandler) BasicImport(rw http.ResponseWriter, req *http.Request) { +func (s *storeHandler) PatchSchema(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) - var config client.BackupConfig - if err := requestJSON(req, &config); err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - err := store.BasicImport(req.Context(), config.Filepath) + var message patchSchemaRequest + err := requestJSON(req, &message) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - rw.WriteHeader(http.StatusOK) -} - -func (s *storeHandler) BasicExport(rw http.ResponseWriter, req *http.Request) { - store := req.Context().Value(storeContextKey).(client.Store) - var config client.BackupConfig - if err := requestJSON(req, &config); err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - err := store.BasicExport(req.Context(), &config) + err = store.PatchSchema(req.Context(), message.Patch, message.SetAsDefaultVersion) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -132,31 +90,15 @@ func (s *storeHandler) BasicExport(rw http.ResponseWriter, req *http.Request) { rw.WriteHeader(http.StatusOK) } -func (s *storeHandler) AddSchema(rw http.ResponseWriter, req *http.Request) { - store := req.Context().Value(storeContextKey).(client.Store) - - schema, err := io.ReadAll(req.Body) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - cols, err := store.AddSchema(req.Context(), string(schema)) - if err != nil { - responseJSON(rw, http.StatusBadRequest, errorResponse{err}) - return - } - responseJSON(rw, http.StatusOK, cols) -} - -func (s *storeHandler) PatchSchema(rw http.ResponseWriter, req *http.Request) { +func (s *storeHandler) SetDefaultSchemaVersion(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) - patch, err := io.ReadAll(req.Body) + schemaVersionID, err := io.ReadAll(req.Body) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - err = store.PatchSchema(req.Context(), string(patch)) + err = store.SetDefaultSchemaVersion(req.Context(), string(schemaVersionID)) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return @@ -174,35 +116,78 @@ func (s *storeHandler) GetCollection(rw http.ResponseWriter, req *http.Request) responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - responseJSON(rw, http.StatusOK, col.Description()) - case req.URL.Query().Has("schema_id"): - col, err := store.GetCollectionBySchemaID(req.Context(), req.URL.Query().Get("schema_id")) + responseJSON(rw, http.StatusOK, col.Definition()) + case req.URL.Query().Has("schema_root"): + cols, err := store.GetCollectionsBySchemaRoot(req.Context(), req.URL.Query().Get("schema_root")) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - responseJSON(rw, http.StatusOK, col.Description()) + colDesc := make([]client.CollectionDefinition, len(cols)) + for i, col := range cols { + colDesc[i] = col.Definition() + } + responseJSON(rw, http.StatusOK, colDesc) case req.URL.Query().Has("version_id"): - col, err := store.GetCollectionByVersionID(req.Context(), req.URL.Query().Get("version_id")) + cols, err := store.GetCollectionsByVersionID(req.Context(), req.URL.Query().Get("version_id")) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - responseJSON(rw, http.StatusOK, col.Description()) + colDesc := make([]client.CollectionDefinition, len(cols)) + for i, col := range cols { + colDesc[i] = col.Definition() + } + responseJSON(rw, http.StatusOK, colDesc) default: cols, err := store.GetAllCollections(req.Context()) if err != nil { responseJSON(rw, http.StatusBadRequest, errorResponse{err}) return } - colDesc := make([]client.CollectionDescription, len(cols)) + colDesc := make([]client.CollectionDefinition, len(cols)) for i, col := range cols { - colDesc[i] = col.Description() + colDesc[i] = col.Definition() } responseJSON(rw, http.StatusOK, colDesc) } } +func (s *storeHandler) GetSchema(rw http.ResponseWriter, req *http.Request) { + store := req.Context().Value(storeContextKey).(client.Store) + + switch { + case req.URL.Query().Has("version_id"): + schema, err := store.GetSchemaByVersionID(req.Context(), req.URL.Query().Get("version_id")) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, schema) + case req.URL.Query().Has("root"): + schema, err := store.GetSchemasByRoot(req.Context(), req.URL.Query().Get("root")) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, schema) + case req.URL.Query().Has("name"): + schema, err := store.GetSchemasByName(req.Context(), req.URL.Query().Get("name")) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, schema) + default: + schema, err := store.GetAllSchemas(req.Context()) + if err != nil { + responseJSON(rw, http.StatusBadRequest, errorResponse{err}) + return + } + responseJSON(rw, http.StatusOK, schema) + } +} + func (s *storeHandler) GetAllIndexes(rw http.ResponseWriter, req *http.Request) { store := req.Context().Value(storeContextKey).(client.Store) @@ -330,3 +315,217 @@ func (s *storeHandler) ExecRequest(rw http.ResponseWriter, req *http.Request) { } } } + +func (h *storeHandler) bindRoutes(router *Router) { + successResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/success", + } + errorResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/error", + } + collectionSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/collection", + } + schemaSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/schema", + } + graphQLRequestSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/graphql_request", + } + graphQLResponseSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/graphql_response", + } + backupConfigSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/backup_config", + } + patchSchemaRequestSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/patch_schema_request", + } + + collectionArraySchema := openapi3.NewArraySchema() + collectionArraySchema.Items = collectionSchema + + addSchemaResponse := openapi3.NewResponse(). + WithDescription("Collection(s)"). + WithJSONSchema(collectionArraySchema) + + addSchemaRequest := openapi3.NewRequestBody(). + WithContent(openapi3.NewContentWithSchema(openapi3.NewStringSchema(), []string{"text/plain"})) + + addSchema := openapi3.NewOperation() + addSchema.OperationID = "add_schema" + addSchema.Description = "Add a new schema definition" + addSchema.Tags = []string{"schema"} + addSchema.RequestBody = &openapi3.RequestBodyRef{ + Value: addSchemaRequest, + } + addSchema.AddResponse(200, addSchemaResponse) + addSchema.Responses["400"] = errorResponse + + patchSchemaRequest := openapi3.NewRequestBody(). + WithJSONSchemaRef(patchSchemaRequestSchema) + + patchSchema := openapi3.NewOperation() + patchSchema.OperationID = "patch_schema" + patchSchema.Description = "Update a schema definition" + patchSchema.Tags = []string{"schema"} + patchSchema.RequestBody = &openapi3.RequestBodyRef{ + Value: patchSchemaRequest, + } + patchSchema.Responses = make(openapi3.Responses) + patchSchema.Responses["200"] = successResponse + patchSchema.Responses["400"] = errorResponse + + setDefaultSchemaVersionRequest := openapi3.NewRequestBody(). + WithContent(openapi3.NewContentWithSchema(openapi3.NewStringSchema(), []string{"text/plain"})) + + setDefaultSchemaVersion := openapi3.NewOperation() + setDefaultSchemaVersion.OperationID = "set_default_schema_version" + setDefaultSchemaVersion.Description = "Set the default schema version for a collection" + setDefaultSchemaVersion.Tags = []string{"schema"} + setDefaultSchemaVersion.RequestBody = &openapi3.RequestBodyRef{ + Value: setDefaultSchemaVersionRequest, + } + setDefaultSchemaVersion.Responses = make(openapi3.Responses) + setDefaultSchemaVersion.Responses["200"] = successResponse + setDefaultSchemaVersion.Responses["400"] = errorResponse + + backupRequest := openapi3.NewRequestBody(). + WithRequired(true). + WithJSONSchemaRef(backupConfigSchema) + + backupExport := openapi3.NewOperation() + backupExport.OperationID = "backup_export" + backupExport.Description = "Export a database backup to file" + backupExport.Tags = []string{"backup"} + backupExport.Responses = make(openapi3.Responses) + backupExport.Responses["200"] = successResponse + backupExport.Responses["400"] = errorResponse + backupExport.RequestBody = &openapi3.RequestBodyRef{ + Value: backupRequest, + } + + backupImport := openapi3.NewOperation() + backupImport.OperationID = "backup_import" + backupImport.Description = "Import a database backup from file" + backupImport.Tags = []string{"backup"} + backupImport.Responses = make(openapi3.Responses) + backupImport.Responses["200"] = successResponse + backupImport.Responses["400"] = errorResponse + backupImport.RequestBody = &openapi3.RequestBodyRef{ + Value: backupRequest, + } + + collectionNameQueryParam := openapi3.NewQueryParameter("name"). + WithDescription("Collection name"). + WithSchema(openapi3.NewStringSchema()) + collectionSchemaRootQueryParam := openapi3.NewQueryParameter("schema_root"). + WithDescription("Collection schema root"). + WithSchema(openapi3.NewStringSchema()) + collectionVersionIdQueryParam := openapi3.NewQueryParameter("version_id"). + WithDescription("Collection schema version id"). + WithSchema(openapi3.NewStringSchema()) + + collectionsSchema := openapi3.NewArraySchema() + collectionsSchema.Items = collectionSchema + + collectionResponseSchema := openapi3.NewOneOfSchema() + collectionResponseSchema.OneOf = openapi3.SchemaRefs{ + collectionSchema, + openapi3.NewSchemaRef("", collectionsSchema), + } + + collectionsResponse := openapi3.NewResponse(). + WithDescription("Collection(s) with matching name, schema id, or version id."). + WithJSONSchema(collectionResponseSchema) + + collectionDescribe := openapi3.NewOperation() + collectionDescribe.OperationID = "collection_describe" + collectionDescribe.Description = "Introspect collection(s) by name, schema id, or version id." + collectionDescribe.Tags = []string{"collection"} + collectionDescribe.AddParameter(collectionNameQueryParam) + collectionDescribe.AddParameter(collectionSchemaRootQueryParam) + collectionDescribe.AddParameter(collectionVersionIdQueryParam) + collectionDescribe.AddResponse(200, collectionsResponse) + collectionDescribe.Responses["400"] = errorResponse + + schemaNameQueryParam := openapi3.NewQueryParameter("name"). + WithDescription("Schema name"). + WithSchema(openapi3.NewStringSchema()) + schemaSchemaRootQueryParam := openapi3.NewQueryParameter("root"). + WithDescription("Schema root"). + WithSchema(openapi3.NewStringSchema()) + schemaVersionIDQueryParam := openapi3.NewQueryParameter("version_id"). + WithDescription("Schema version id"). + WithSchema(openapi3.NewStringSchema()) + + schemasSchema := openapi3.NewArraySchema() + schemasSchema.Items = schemaSchema + + schemaResponseSchema := openapi3.NewOneOfSchema() + schemaResponseSchema.OneOf = openapi3.SchemaRefs{ + schemaSchema, + openapi3.NewSchemaRef("", schemasSchema), + } + + schemaResponse := openapi3.NewResponse(). + WithDescription("Schema(s) with matching name, schema id, or version id."). + WithJSONSchema(schemaResponseSchema) + + schemaDescribe := openapi3.NewOperation() + schemaDescribe.OperationID = "schema_describe" + schemaDescribe.Description = "Introspect schema(s) by name, schema root, or version id." + schemaDescribe.Tags = []string{"schema"} + schemaDescribe.AddParameter(schemaNameQueryParam) + schemaDescribe.AddParameter(schemaSchemaRootQueryParam) + schemaDescribe.AddParameter(schemaVersionIDQueryParam) + schemaDescribe.AddResponse(200, schemaResponse) + schemaDescribe.Responses["400"] = errorResponse + + graphQLRequest := openapi3.NewRequestBody(). + WithContent(openapi3.NewContentWithJSONSchemaRef(graphQLRequestSchema)) + + graphQLResponse := openapi3.NewResponse(). + WithDescription("GraphQL response"). + WithContent(openapi3.NewContentWithJSONSchemaRef(graphQLResponseSchema)) + + graphQLPost := openapi3.NewOperation() + graphQLPost.Description = "GraphQL POST endpoint" + graphQLPost.OperationID = "graphql_post" + graphQLPost.Tags = []string{"graphql"} + graphQLPost.RequestBody = &openapi3.RequestBodyRef{ + Value: graphQLRequest, + } + graphQLPost.AddResponse(200, graphQLResponse) + graphQLPost.Responses["400"] = errorResponse + + graphQLQueryParam := openapi3.NewQueryParameter("query"). + WithSchema(openapi3.NewStringSchema()) + + graphQLGet := openapi3.NewOperation() + graphQLGet.Description = "GraphQL GET endpoint" + graphQLGet.OperationID = "graphql_get" + graphQLGet.Tags = []string{"graphql"} + graphQLGet.AddParameter(graphQLQueryParam) + graphQLGet.AddResponse(200, graphQLResponse) + graphQLGet.Responses["400"] = errorResponse + + debugDump := openapi3.NewOperation() + debugDump.Description = "Dump database" + debugDump.OperationID = "debug_dump" + debugDump.Tags = []string{"debug"} + debugDump.Responses = make(openapi3.Responses) + debugDump.Responses["200"] = successResponse + debugDump.Responses["400"] = errorResponse + + router.AddRoute("/backup/export", http.MethodPost, backupExport, h.BasicExport) + router.AddRoute("/backup/import", http.MethodPost, backupImport, h.BasicImport) + router.AddRoute("/collections", http.MethodGet, collectionDescribe, h.GetCollection) + router.AddRoute("/graphql", http.MethodGet, graphQLGet, h.ExecRequest) + router.AddRoute("/graphql", http.MethodPost, graphQLPost, h.ExecRequest) + router.AddRoute("/debug/dump", http.MethodGet, debugDump, h.PrintDump) + router.AddRoute("/schema", http.MethodPost, addSchema, h.AddSchema) + router.AddRoute("/schema", http.MethodPatch, patchSchema, h.PatchSchema) + router.AddRoute("/schema", http.MethodGet, schemaDescribe, h.GetSchema) + router.AddRoute("/schema/default", http.MethodPost, setDefaultSchemaVersion, h.SetDefaultSchemaVersion) +} diff --git a/http/handler_tx.go b/http/handler_tx.go index b7f1c82545..6bdb6b2009 100644 --- a/http/handler_tx.go +++ b/http/handler_tx.go @@ -15,6 +15,7 @@ import ( "strconv" "sync" + "github.com/getkin/kin-openapi/openapi3" "github.com/go-chi/chi/v5" "github.com/sourcenetwork/defradb/client" @@ -93,3 +94,66 @@ func (h *txHandler) Discard(rw http.ResponseWriter, req *http.Request) { txVal.(datastore.Txn).Discard(req.Context()) rw.WriteHeader(http.StatusOK) } + +func (h *txHandler) bindRoutes(router *Router) { + errorResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/error", + } + successResponse := &openapi3.ResponseRef{ + Ref: "#/components/responses/success", + } + createTxSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/create_tx", + } + + txnReadOnlyQueryParam := openapi3.NewQueryParameter("read_only"). + WithDescription("Read only transaction"). + WithSchema(openapi3.NewBoolSchema().WithDefault(false)) + + txnCreateResponse := openapi3.NewResponse(). + WithDescription("Transaction info"). + WithJSONSchemaRef(createTxSchema) + + txnCreate := openapi3.NewOperation() + txnCreate.OperationID = "new_transaction" + txnCreate.Description = "Create a new transaction" + txnCreate.Tags = []string{"transaction"} + txnCreate.AddParameter(txnReadOnlyQueryParam) + txnCreate.AddResponse(200, txnCreateResponse) + txnCreate.Responses["400"] = errorResponse + + txnConcurrent := openapi3.NewOperation() + txnConcurrent.OperationID = "new_concurrent_transaction" + txnConcurrent.Description = "Create a new concurrent transaction" + txnConcurrent.Tags = []string{"transaction"} + txnConcurrent.AddParameter(txnReadOnlyQueryParam) + txnConcurrent.AddResponse(200, txnCreateResponse) + txnConcurrent.Responses["400"] = errorResponse + + txnIdPathParam := openapi3.NewPathParameter("id"). + WithRequired(true). + WithSchema(openapi3.NewInt64Schema()) + + txnCommit := openapi3.NewOperation() + txnCommit.OperationID = "transaction_commit" + txnCommit.Description = "Commit a transaction" + txnCommit.Tags = []string{"transaction"} + txnCommit.AddParameter(txnIdPathParam) + txnCommit.Responses = make(openapi3.Responses) + txnCommit.Responses["200"] = successResponse + txnCommit.Responses["400"] = errorResponse + + txnDiscard := openapi3.NewOperation() + txnDiscard.OperationID = "transaction_discard" + txnDiscard.Description = "Discard a transaction" + txnDiscard.Tags = []string{"transaction"} + txnDiscard.AddParameter(txnIdPathParam) + txnDiscard.Responses = make(openapi3.Responses) + txnDiscard.Responses["200"] = successResponse + txnDiscard.Responses["400"] = errorResponse + + router.AddRoute("/tx", http.MethodPost, txnCreate, h.NewTxn) + router.AddRoute("/tx/concurrent", http.MethodPost, txnConcurrent, h.NewConcurrentTxn) + router.AddRoute("/tx/{id}", http.MethodPost, txnCommit, h.Commit) + router.AddRoute("/tx/{id}", http.MethodDelete, txnDiscard, h.Discard) +} diff --git a/http/http_client.go b/http/http_client.go index 48323607ab..13abb3c6d0 100644 --- a/http/http_client.go +++ b/http/http_client.go @@ -16,6 +16,7 @@ import ( "io" "net/http" "net/url" + "strings" ) type httpClient struct { @@ -24,12 +25,19 @@ type httpClient struct { txValue string } -func newHttpClient(baseURL *url.URL) *httpClient { +func newHttpClient(rawURL string) (*httpClient, error) { + if !strings.HasPrefix(rawURL, "http") { + rawURL = "http://" + rawURL + } + baseURL, err := url.Parse(rawURL) + if err != nil { + return nil, err + } client := httpClient{ client: http.DefaultClient, - baseURL: baseURL, + baseURL: baseURL.JoinPath("/api/v0"), } - return &client + return &client, nil } func (c *httpClient) withTxn(value uint64) *httpClient { diff --git a/http/middleware.go b/http/middleware.go index 28f1e0ff1e..d33cbfb5ff 100644 --- a/http/middleware.go +++ b/http/middleware.go @@ -14,9 +14,12 @@ import ( "context" "net/http" "strconv" + "strings" "sync" "github.com/go-chi/chi/v5" + "github.com/go-chi/cors" + "golang.org/x/exp/slices" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" @@ -52,10 +55,29 @@ var ( colContextKey = contextKey("col") ) +// CorsMiddleware handles cross origin request +func CorsMiddleware(opts ServerOptions) func(http.Handler) http.Handler { + return cors.Handler(cors.Options{ + AllowOriginFunc: func(r *http.Request, origin string) bool { + if slices.Contains(opts.AllowedOrigins, "*") { + return true + } + return slices.Contains(opts.AllowedOrigins, strings.ToLower(origin)) + }, + AllowedMethods: []string{"GET", "HEAD", "POST", "PATCH", "DELETE"}, + AllowedHeaders: []string{"Content-Type"}, + MaxAge: 300, + }) +} + // ApiMiddleware sets the required context values for all API requests. -func ApiMiddleware(db client.DB, txs *sync.Map) func(http.Handler) http.Handler { +func ApiMiddleware(db client.DB, txs *sync.Map, opts ServerOptions) func(http.Handler) http.Handler { return func(next http.Handler) http.Handler { return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + if opts.TLS.HasValue() { + rw.Header().Add("Strict-Transport-Security", "max-age=63072000; includeSubDomains") + } + ctx := req.Context() ctx = context.WithValue(ctx, dbContextKey, db) ctx = context.WithValue(ctx, txsContextKey, txs) diff --git a/http/openapi.go b/http/openapi.go new file mode 100644 index 0000000000..4aa217e939 --- /dev/null +++ b/http/openapi.go @@ -0,0 +1,150 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "github.com/getkin/kin-openapi/openapi3" + "github.com/getkin/kin-openapi/openapi3gen" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/sourcenetwork/defradb/client" +) + +// openApiSchemas is a mapping of types to auto generate schemas for. +var openApiSchemas = map[string]any{ + "error": &errorResponse{}, + "create_tx": &CreateTxResponse{}, + "collection_update": &CollectionUpdateRequest{}, + "collection_delete": &CollectionDeleteRequest{}, + "peer_info": &peer.AddrInfo{}, + "graphql_request": &GraphQLRequest{}, + "graphql_response": &GraphQLResponse{}, + "backup_config": &client.BackupConfig{}, + "collection": &client.CollectionDescription{}, + "schema": &client.SchemaDescription{}, + "index": &client.IndexDescription{}, + "delete_result": &client.DeleteResult{}, + "update_result": &client.UpdateResult{}, + "lens_config": &client.LensConfig{}, + "replicator": &client.Replicator{}, + "ccip_request": &CCIPRequest{}, + "ccip_response": &CCIPResponse{}, + "patch_schema_request": &patchSchemaRequest{}, +} + +func NewOpenAPISpec() (*openapi3.T, error) { + schemas := make(openapi3.Schemas) + responses := make(openapi3.Responses) + parameters := make(openapi3.ParametersMap) + + generator := openapi3gen.NewGenerator(openapi3gen.UseAllExportedFields()) + for key, val := range openApiSchemas { + ref, err := generator.NewSchemaRefForValue(val, schemas) + if err != nil { + return nil, err + } + schemas[key] = ref + } + + errorSchema := &openapi3.SchemaRef{ + Ref: "#/components/schemas/error", + } + + errorResponse := openapi3.NewResponse(). + WithDescription("error"). + WithContent(openapi3.NewContentWithJSONSchemaRef(errorSchema)) + + successResponse := openapi3.NewResponse(). + WithDescription("ok") + + txnHeaderParam := openapi3.NewHeaderParameter("x-defradb-tx"). + WithDescription("Transaction id"). + WithSchema(openapi3.NewInt64Schema()) + + // add common schemas, responses, and params so we can reference them + schemas["document"] = &openapi3.SchemaRef{ + Value: openapi3.NewObjectSchema().WithAnyAdditionalProperties(), + } + responses["success"] = &openapi3.ResponseRef{ + Value: successResponse, + } + responses["error"] = &openapi3.ResponseRef{ + Value: errorResponse, + } + parameters["txn"] = &openapi3.ParameterRef{ + Value: txnHeaderParam, + } + + return &openapi3.T{ + OpenAPI: "3.0.3", + Info: &openapi3.Info{ + Title: "DefraDB API", + Version: "0", + }, + Paths: make(openapi3.Paths), + Servers: openapi3.Servers{ + &openapi3.Server{ + Description: "Local DefraDB instance", + URL: "http://localhost:9181/api/v0", + }, + }, + ExternalDocs: &openapi3.ExternalDocs{ + Description: "Learn more about DefraDB", + URL: "https://docs.source.network", + }, + Components: &openapi3.Components{ + Schemas: schemas, + Responses: responses, + Parameters: parameters, + }, + Tags: openapi3.Tags{ + &openapi3.Tag{ + Name: "schema", + Description: "Add or update schema definitions", + }, + &openapi3.Tag{ + Name: "collection", + Description: "Add, remove, or update documents", + }, + &openapi3.Tag{ + Name: "index", + Description: "Add, update, or remove indexes", + }, + &openapi3.Tag{ + Name: "lens", + Description: "Migrate documents to and from schema versions", + }, + &openapi3.Tag{ + Name: "p2p", + Description: "Peer-to-peer network operations", + }, + &openapi3.Tag{ + Name: "transaction", + Description: "Database transaction operations", + }, + &openapi3.Tag{ + Name: "backup", + Description: "Database backup operations", + }, + &openapi3.Tag{ + Name: "graphql", + Description: "GraphQL query endpoints", + }, + &openapi3.Tag{ + Name: "ccip", + ExternalDocs: &openapi3.ExternalDocs{ + Description: "EIP-3668", + URL: "https://eips.ethereum.org/EIPS/eip-3668", + }, + }, + }, + }, nil +} diff --git a/http/router.go b/http/router.go new file mode 100644 index 0000000000..ce8d4fc62f --- /dev/null +++ b/http/router.go @@ -0,0 +1,68 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package http + +import ( + "context" + "net/http" + + "github.com/getkin/kin-openapi/openapi3" + "github.com/go-chi/chi/v5" +) + +type Router struct { + mux chi.Router + oas *openapi3.T +} + +func NewRouter() (*Router, error) { + oas, err := NewOpenAPISpec() + if err != nil { + return nil, err + } + return &Router{chi.NewMux(), oas}, nil +} + +// AddMiddleware adds middleware functions to the current route group. +func (r *Router) AddMiddleware(middlewares ...func(http.Handler) http.Handler) { + r.mux.Use(middlewares...) +} + +// RouteGroup adds handlers as a group. +func (r *Router) AddRouteGroup(group func(*Router)) { + r.mux.Group(func(router chi.Router) { + group(&Router{router, r.oas}) + }) +} + +// AddRoute adds a handler for the given route. +func (r *Router) AddRoute(pattern, method string, op *openapi3.Operation, handler http.HandlerFunc) { + r.mux.MethodFunc(method, pattern, handler) + r.oas.AddOperation(pattern, method, op) +} + +// Validate returns an error if the OpenAPI specification is invalid. +func (r *Router) Validate(ctx context.Context) error { + loader := openapi3.NewLoader() + if err := loader.ResolveRefsIn(r.oas, nil); err != nil { + return err + } + return r.oas.Validate(ctx) +} + +// OpenAPI returns the OpenAPI specification. +func (r *Router) OpenAPI() *openapi3.T { + return r.oas +} + +func (r *Router) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + r.mux.ServeHTTP(rw, req) +} diff --git a/http/server.go b/http/server.go index afee4b9217..384264a8a6 100644 --- a/http/server.go +++ b/http/server.go @@ -11,101 +11,306 @@ package http import ( + "context" + "crypto/tls" + "fmt" + "net" "net/http" - "sync" + "path" + "strings" - "github.com/go-chi/chi/v5" - "github.com/go-chi/chi/v5/middleware" + "github.com/sourcenetwork/immutable" + "golang.org/x/crypto/acme/autocert" "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/logging" ) +const ( + // These constants are best effort durations that fit our current API + // and possibly prevent from running out of file descriptors. + // readTimeout = 5 * time.Second + // writeTimeout = 10 * time.Second + // idleTimeout = 120 * time.Second + + // Temparily disabling timeouts until [this proposal](https://github.com/golang/go/issues/54136) is merged. + // https://github.com/sourcenetwork/defradb/issues/927 + readTimeout = 0 + writeTimeout = 0 + idleTimeout = 0 +) + +const ( + httpPort = ":80" + httpsPort = ":443" +) + +// Server struct holds the Handler for the HTTP API. type Server struct { - db client.DB - router *chi.Mux - txs *sync.Map -} - -func NewServer(db client.DB) *Server { - txs := &sync.Map{} - - tx_handler := &txHandler{} - store_handler := &storeHandler{} - collection_handler := &collectionHandler{} - lens_handler := &lensHandler{} - - router := chi.NewRouter() - router.Use(middleware.RequestLogger(&logFormatter{})) - router.Use(middleware.Recoverer) - - router.Route("/api/v0", func(api chi.Router) { - api.Use(ApiMiddleware(db, txs), TransactionMiddleware, StoreMiddleware) - api.Route("/tx", func(tx chi.Router) { - tx.Post("/", tx_handler.NewTxn) - tx.Post("/concurrent", tx_handler.NewConcurrentTxn) - tx.Post("/{id}", tx_handler.Commit) - tx.Delete("/{id}", tx_handler.Discard) - }) - api.Route("/backup", func(backup chi.Router) { - backup.Post("/export", store_handler.BasicExport) - backup.Post("/import", store_handler.BasicImport) - }) - api.Route("/schema", func(schema chi.Router) { - schema.Post("/", store_handler.AddSchema) - schema.Patch("/", store_handler.PatchSchema) - }) - api.Route("/collections", func(collections chi.Router) { - collections.Get("/", store_handler.GetCollection) - // with collection middleware - collections_tx := collections.With(CollectionMiddleware) - collections_tx.Get("/{name}", collection_handler.GetAllDocKeys) - collections_tx.Post("/{name}", collection_handler.Create) - collections_tx.Patch("/{name}", collection_handler.UpdateWith) - collections_tx.Delete("/{name}", collection_handler.DeleteWith) - collections_tx.Post("/{name}/indexes", collection_handler.CreateIndex) - collections_tx.Get("/{name}/indexes", collection_handler.GetIndexes) - collections_tx.Delete("/{name}/indexes/{index}", collection_handler.DropIndex) - collections_tx.Get("/{name}/{key}", collection_handler.Get) - collections_tx.Patch("/{name}/{key}", collection_handler.Update) - collections_tx.Delete("/{name}/{key}", collection_handler.Delete) - }) - api.Route("/lens", func(lens chi.Router) { - lens.Use(LensMiddleware) - lens.Get("/", lens_handler.Config) - lens.Post("/", lens_handler.SetMigration) - lens.Post("/reload", lens_handler.ReloadLenses) - lens.Get("/{version}", lens_handler.HasMigration) - lens.Post("/{version}/up", lens_handler.MigrateUp) - lens.Post("/{version}/down", lens_handler.MigrateDown) - }) - api.Route("/graphql", func(graphQL chi.Router) { - graphQL.Get("/", store_handler.ExecRequest) - graphQL.Post("/", store_handler.ExecRequest) - }) - api.Route("/p2p", func(p2p chi.Router) { - p2p.Route("/replicators", func(p2p_replicators chi.Router) { - p2p_replicators.Get("/", store_handler.GetAllReplicators) - p2p_replicators.Post("/", store_handler.SetReplicator) - p2p_replicators.Delete("/", store_handler.DeleteReplicator) - }) - p2p.Route("/collections", func(p2p_collections chi.Router) { - p2p_collections.Get("/", store_handler.GetAllP2PCollections) - p2p_collections.Post("/{id}", store_handler.AddP2PCollection) - p2p_collections.Delete("/{id}", store_handler.RemoveP2PCollection) - }) - }) - api.Route("/debug", func(debug chi.Router) { - debug.Get("/dump", store_handler.PrintDump) - }) - }) - - return &Server{ - db: db, - router: router, - txs: txs, - } -} - -func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) { - s.router.ServeHTTP(w, req) + options ServerOptions + listener net.Listener + certManager *autocert.Manager + // address that is assigned to the server on listen + address string + + http.Server +} + +type ServerOptions struct { + // AllowedOrigins is the list of allowed origins for CORS. + AllowedOrigins []string + // TLS enables https when the value is present. + TLS immutable.Option[TLSOptions] + // RootDirectory is the directory for the node config. + RootDir string + // Domain is the domain for the API (optional). + Domain immutable.Option[string] +} + +type TLSOptions struct { + // PublicKey is the public key for TLS. Ignored if domain is set. + PublicKey string + // PrivateKey is the private key for TLS. Ignored if domain is set. + PrivateKey string + // Email is the address for the CA to send problem notifications (optional) + Email string + // Port is the tls port + Port string +} + +// NewServer instantiates a new server with the given http.Handler. +func NewServer(db client.DB, options ...func(*Server)) (*Server, error) { + srv := &Server{ + Server: http.Server{ + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + IdleTimeout: idleTimeout, + }, + } + + for _, opt := range append(options, DefaultOpts()) { + opt(srv) + } + + handler, err := NewHandler(db, srv.options) + if err != nil { + return nil, err + } + srv.Handler = handler + return srv, nil +} + +func newHTTPRedirServer(m *autocert.Manager) *Server { + srv := &Server{ + Server: http.Server{ + ReadTimeout: readTimeout, + WriteTimeout: writeTimeout, + IdleTimeout: idleTimeout, + }, + } + + srv.Addr = httpPort + srv.Handler = m.HTTPHandler(nil) + + return srv +} + +// DefaultOpts returns the default options for the server. +func DefaultOpts() func(*Server) { + return func(s *Server) { + if s.Addr == "" { + s.Addr = "localhost:9181" + } + } +} + +// WithAllowedOrigins returns an option to set the allowed origins for CORS. +func WithAllowedOrigins(origins ...string) func(*Server) { + return func(s *Server) { + s.options.AllowedOrigins = append(s.options.AllowedOrigins, origins...) + } +} + +// WithAddress returns an option to set the address for the server. +func WithAddress(addr string) func(*Server) { + return func(s *Server) { + s.Addr = addr + + // If the address is not localhost, we check to see if it's a valid IP address. + // If it's not a valid IP, we assume that it's a domain name to be used with TLS. + if !strings.HasPrefix(addr, "localhost:") && !strings.HasPrefix(addr, ":") { + host, _, err := net.SplitHostPort(addr) + if err != nil { + host = addr + } + ip := net.ParseIP(host) + if ip == nil { + s.Addr = httpPort + s.options.Domain = immutable.Some(host) + } + } + } +} + +// WithCAEmail returns an option to set the email address for the CA to send problem notifications. +func WithCAEmail(email string) func(*Server) { + return func(s *Server) { + tlsOpt := s.options.TLS.Value() + tlsOpt.Email = email + s.options.TLS = immutable.Some(tlsOpt) + } +} + +// WithRootDir returns an option to set the root directory for the node config. +func WithRootDir(rootDir string) func(*Server) { + return func(s *Server) { + s.options.RootDir = rootDir + } +} + +// WithSelfSignedCert returns an option to set the public and private keys for TLS. +func WithSelfSignedCert(pubKey, privKey string) func(*Server) { + return func(s *Server) { + tlsOpt := s.options.TLS.Value() + tlsOpt.PublicKey = pubKey + tlsOpt.PrivateKey = privKey + s.options.TLS = immutable.Some(tlsOpt) + } +} + +// WithTLS returns an option to enable TLS. +func WithTLS() func(*Server) { + return func(s *Server) { + tlsOpt := s.options.TLS.Value() + tlsOpt.Port = httpsPort + s.options.TLS = immutable.Some(tlsOpt) + } +} + +// WithTLSPort returns an option to set the port for TLS. +func WithTLSPort(port int) func(*Server) { + return func(s *Server) { + tlsOpt := s.options.TLS.Value() + tlsOpt.Port = fmt.Sprintf(":%d", port) + s.options.TLS = immutable.Some(tlsOpt) + } +} + +// Listen creates a new net.Listener and saves it on the receiver. +func (s *Server) Listen(ctx context.Context) error { + var err error + if s.options.TLS.HasValue() { + return s.listenWithTLS(ctx) + } + + lc := net.ListenConfig{} + s.listener, err = lc.Listen(ctx, "tcp", s.Addr) + if err != nil { + return errors.WithStack(err) + } + + // Save the address on the server in case the port was set to random + // and that we want to see what was assigned. + s.address = s.listener.Addr().String() + + return nil +} + +func (s *Server) listenWithTLS(ctx context.Context) error { + cfg := &tls.Config{ + MinVersion: tls.VersionTLS12, + // We only allow cipher suites that are marked secure + // by ssllabs + CipherSuites: []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + }, + ServerName: "DefraDB", + } + + if s.options.Domain.HasValue() && s.options.Domain.Value() != "" { + s.Addr = s.options.TLS.Value().Port + + if s.options.TLS.Value().Email == "" || s.options.TLS.Value().Email == config.DefaultAPIEmail { + return ErrNoEmail + } + + certCache := path.Join(s.options.RootDir, "autocerts") + + log.FeedbackInfo( + ctx, + "Generating auto certificate", + logging.NewKV("Domain", s.options.Domain.Value()), + logging.NewKV("Certificate cache", certCache), + ) + + m := &autocert.Manager{ + Cache: autocert.DirCache(certCache), + Prompt: autocert.AcceptTOS, + Email: s.options.TLS.Value().Email, + HostPolicy: autocert.HostWhitelist(s.options.Domain.Value()), + } + + cfg.GetCertificate = m.GetCertificate + + // We set manager on the server instance to later start + // a redirection server. + s.certManager = m + } else { + // When not using auto cert, we create a self signed certificate + // with the provided public and prive keys. + log.FeedbackInfo(ctx, "Generating self signed certificate") + + cert, err := tls.LoadX509KeyPair( + s.options.TLS.Value().PrivateKey, + s.options.TLS.Value().PublicKey, + ) + if err != nil { + return errors.WithStack(err) + } + + cfg.Certificates = []tls.Certificate{cert} + } + + var err error + s.listener, err = tls.Listen("tcp", s.Addr, cfg) + if err != nil { + return errors.WithStack(err) + } + + // Save the address on the server in case the port was set to random + // and that we want to see what was assigned. + s.address = s.listener.Addr().String() + + return nil +} + +// Run calls Serve with the receiver's listener. +func (s *Server) Run(ctx context.Context) error { + if s.listener == nil { + return ErrNoListener + } + + if s.certManager != nil { + // When using TLS it's important to redirect http requests to https + go func() { + srv := newHTTPRedirServer(s.certManager) + err := srv.ListenAndServe() + if err != nil && !errors.Is(err, http.ErrServerClosed) { + log.Info(ctx, "Something went wrong with the redirection server", logging.NewKV("Error", err)) + } + }() + } + return s.Serve(s.listener) +} + +// AssignedAddr returns the address that was assigned to the server on calls to listen. +func (s *Server) AssignedAddr() string { + return s.address } diff --git a/api/http/server_test.go b/http/server_test.go similarity index 71% rename from api/http/server_test.go rename to http/server_test.go index c19e60a2ac..5e970ad317 100644 --- a/api/http/server_test.go +++ b/http/server_test.go @@ -1,4 +1,4 @@ -// Copyright 2022 Democratized Data Foundation +// Copyright 2023 Democratized Data Foundation // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. @@ -17,12 +17,14 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/crypto/acme/autocert" ) func TestNewServerAndRunWithoutListener(t *testing.T) { ctx := context.Background() - s := NewServer(nil, WithAddress(":0")) + s, err := NewServer(nil, WithAddress(":0")) + require.NoError(t, err) if ok := assert.NotNil(t, s); ok { assert.Equal(t, ErrNoListener, s.Run(ctx)) } @@ -30,7 +32,8 @@ func TestNewServerAndRunWithoutListener(t *testing.T) { func TestNewServerAndRunWithListenerAndInvalidPort(t *testing.T) { ctx := context.Background() - s := NewServer(nil, WithAddress(":303000")) + s, err := NewServer(nil, WithAddress(":303000")) + require.NoError(t, err) if ok := assert.NotNil(t, s); ok { assert.Error(t, s.Listen(ctx)) } @@ -40,7 +43,8 @@ func TestNewServerAndRunWithListenerAndValidPort(t *testing.T) { ctx := context.Background() serverRunning := make(chan struct{}) serverDone := make(chan struct{}) - s := NewServer(nil, WithAddress(":0")) + s, err := NewServer(nil, WithAddress(":0")) + require.NoError(t, err) go func() { close(serverRunning) err := s.Listen(ctx) @@ -60,9 +64,9 @@ func TestNewServerAndRunWithListenerAndValidPort(t *testing.T) { func TestNewServerAndRunWithAutocertWithoutEmail(t *testing.T) { ctx := context.Background() dir := t.TempDir() - s := NewServer(nil, WithAddress("example.com"), WithRootDir(dir), WithTLSPort(0)) - - err := s.Listen(ctx) + s, err := NewServer(nil, WithAddress("example.com"), WithRootDir(dir), WithTLSPort(0)) + require.NoError(t, err) + err = s.Listen(ctx) assert.ErrorIs(t, err, ErrNoEmail) s.Shutdown(context.Background()) @@ -73,7 +77,8 @@ func TestNewServerAndRunWithAutocert(t *testing.T) { serverRunning := make(chan struct{}) serverDone := make(chan struct{}) dir := t.TempDir() - s := NewServer(nil, WithAddress("example.com"), WithRootDir(dir), WithTLSPort(0), WithCAEmail("dev@defradb.net")) + s, err := NewServer(nil, WithAddress("example.com"), WithRootDir(dir), WithTLSPort(0), WithCAEmail("dev@defradb.net")) + require.NoError(t, err) go func() { close(serverRunning) err := s.Listen(ctx) @@ -95,7 +100,8 @@ func TestNewServerAndRunWithSelfSignedCertAndNoKeyFiles(t *testing.T) { serverRunning := make(chan struct{}) serverDone := make(chan struct{}) dir := t.TempDir() - s := NewServer(nil, WithAddress("localhost:0"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) + s, err := NewServer(nil, WithAddress("localhost:0"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) + require.NoError(t, err) go func() { close(serverRunning) err := s.Listen(ctx) @@ -149,7 +155,8 @@ func TestNewServerAndRunWithSelfSignedCertAndInvalidPort(t *testing.T) { if err != nil { t.Fatal(err) } - s := NewServer(nil, WithAddress(":303000"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) + s, err := NewServer(nil, WithAddress(":303000"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) + require.NoError(t, err) go func() { close(serverRunning) err := s.Listen(ctx) @@ -177,7 +184,8 @@ func TestNewServerAndRunWithSelfSignedCert(t *testing.T) { if err != nil { t.Fatal(err) } - s := NewServer(nil, WithAddress("localhost:0"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) + s, err := NewServer(nil, WithAddress("localhost:0"), WithSelfSignedCert(dir+"/server.crt", dir+"/server.key")) + require.NoError(t, err) go func() { close(serverRunning) err := s.Listen(ctx) @@ -195,53 +203,56 @@ func TestNewServerAndRunWithSelfSignedCert(t *testing.T) { } func TestNewServerWithoutOptions(t *testing.T) { - s := NewServer(nil) + s, err := NewServer(nil) + require.NoError(t, err) assert.Equal(t, "localhost:9181", s.Addr) - assert.Equal(t, []string(nil), s.options.allowedOrigins) + assert.Equal(t, []string(nil), s.options.AllowedOrigins) } func TestNewServerWithAddress(t *testing.T) { - s := NewServer(nil, WithAddress("localhost:9999")) + s, err := NewServer(nil, WithAddress("localhost:9999")) + require.NoError(t, err) assert.Equal(t, "localhost:9999", s.Addr) } func TestNewServerWithDomainAddress(t *testing.T) { - s := NewServer(nil, WithAddress("example.com")) - assert.Equal(t, "example.com", s.options.domain.Value()) - assert.NotNil(t, s.options.tls) + s, err := NewServer(nil, WithAddress("example.com")) + require.NoError(t, err) + assert.Equal(t, "example.com", s.options.Domain.Value()) + assert.NotNil(t, s.options.TLS) } func TestNewServerWithAllowedOrigins(t *testing.T) { - s := NewServer(nil, WithAllowedOrigins("https://source.network", "https://app.source.network")) - assert.Equal(t, []string{"https://source.network", "https://app.source.network"}, s.options.allowedOrigins) + s, err := NewServer(nil, WithAllowedOrigins("https://source.network", "https://app.source.network")) + require.NoError(t, err) + assert.Equal(t, []string{"https://source.network", "https://app.source.network"}, s.options.AllowedOrigins) } func TestNewServerWithCAEmail(t *testing.T) { - s := NewServer(nil, WithCAEmail("me@example.com")) - assert.Equal(t, "me@example.com", s.options.tls.Value().email) -} - -func TestNewServerWithPeerID(t *testing.T) { - s := NewServer(nil, WithPeerID("12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR")) - assert.Equal(t, "12D3KooWFpi6VTYKLtxUftJKEyfX8jDfKi8n15eaygH8ggfYFZbR", s.options.peerID) + s, err := NewServer(nil, WithCAEmail("me@example.com")) + require.NoError(t, err) + assert.Equal(t, "me@example.com", s.options.TLS.Value().Email) } func TestNewServerWithRootDir(t *testing.T) { dir := t.TempDir() - s := NewServer(nil, WithRootDir(dir)) - assert.Equal(t, dir, s.options.rootDir) + s, err := NewServer(nil, WithRootDir(dir)) + require.NoError(t, err) + assert.Equal(t, dir, s.options.RootDir) } func TestNewServerWithTLSPort(t *testing.T) { - s := NewServer(nil, WithTLSPort(44343)) - assert.Equal(t, ":44343", s.options.tls.Value().port) + s, err := NewServer(nil, WithTLSPort(44343)) + require.NoError(t, err) + assert.Equal(t, ":44343", s.options.TLS.Value().Port) } func TestNewServerWithSelfSignedCert(t *testing.T) { - s := NewServer(nil, WithSelfSignedCert("pub.key", "priv.key")) - assert.Equal(t, "pub.key", s.options.tls.Value().pubKey) - assert.Equal(t, "priv.key", s.options.tls.Value().privKey) - assert.NotNil(t, s.options.tls) + s, err := NewServer(nil, WithSelfSignedCert("pub.key", "priv.key")) + require.NoError(t, err) + assert.Equal(t, "pub.key", s.options.TLS.Value().PublicKey) + assert.Equal(t, "priv.key", s.options.TLS.Value().PrivateKey) + assert.NotNil(t, s.options.TLS) } func TestNewHTTPRedirServer(t *testing.T) { diff --git a/http/utils.go b/http/utils.go index a171e0ed38..c7b1507c4e 100644 --- a/http/utils.go +++ b/http/utils.go @@ -34,25 +34,6 @@ func responseJSON(rw http.ResponseWriter, status int, out any) { json.NewEncoder(rw).Encode(out) //nolint:errcheck } -func documentJSON(doc *client.Document) ([]byte, error) { - docMap, err := doc.ToMap() - if err != nil { - return nil, err - } - delete(docMap, "_key") - - for field, value := range doc.Values() { - if !value.IsDirty() { - delete(docMap, field.Name()) - } - if value.IsDelete() { - docMap[field.Name()] = nil - } - } - - return json.Marshal(docMap) -} - func parseError(msg any) error { switch msg { case client.ErrDocumentNotFound.Error(): diff --git a/lens/fetcher.go b/lens/fetcher.go index ee01aa7983..9186adbb7c 100644 --- a/lens/fetcher.go +++ b/lens/fetcher.go @@ -34,7 +34,7 @@ type lensedFetcher struct { txn datastore.Txn - col *client.CollectionDescription + col client.Collection // Cache the fieldDescriptions mapped by name to allow for cheaper access within the fetcher loop fieldDescriptionsByName map[string]client.FieldDescription @@ -58,7 +58,7 @@ func NewFetcher(source fetcher.Fetcher, registry client.LensRegistry) fetcher.Fe func (f *lensedFetcher) Init( ctx context.Context, txn datastore.Txn, - col *client.CollectionDescription, + col client.Collection, fields []client.FieldDescription, filter *mapper.Filter, docmapper *core.DocumentMapping, @@ -67,12 +67,12 @@ func (f *lensedFetcher) Init( ) error { f.col = col - f.fieldDescriptionsByName = make(map[string]client.FieldDescription, len(col.Schema.Fields)) + f.fieldDescriptionsByName = make(map[string]client.FieldDescription, len(col.Schema().Fields)) // Add cache the field descriptions in reverse, allowing smaller-index fields to overwrite any later // ones. This should never really happen here, but it ensures the result is consistent with col.GetField // which returns the first one it finds with a matching name. - for i := len(col.Schema.Fields) - 1; i >= 0; i-- { - field := col.Schema.Fields[i] + for i := len(col.Schema().Fields) - 1; i >= 0; i-- { + field := col.Schema().Fields[i] f.fieldDescriptionsByName[field.Name] = field } @@ -81,11 +81,11 @@ func (f *lensedFetcher) Init( return err } - history, err := getTargetedSchemaHistory(ctx, txn, cfg, f.col.Schema.SchemaID, f.col.Schema.VersionID) + history, err := getTargetedSchemaHistory(ctx, txn, cfg, f.col.Schema().Root, f.col.Schema().VersionID) if err != nil { return err } - f.lens = new(ctx, f.registry, f.col.Schema.VersionID, history) + f.lens = new(ctx, f.registry, f.col.Schema().VersionID, history) f.txn = txn for schemaVersionID := range history { @@ -100,7 +100,7 @@ func (f *lensedFetcher) Init( } } - f.targetVersionID = col.Schema.VersionID + f.targetVersionID = col.Schema().VersionID var innerFetcherFields []client.FieldDescription if f.hasMigrations { @@ -238,7 +238,7 @@ func (f *lensedFetcher) lensDocToEncodedDoc(docAsMap LensDoc) (fetcher.EncodedDo return &lensEncodedDocument{ key: []byte(key), - schemaVersionID: f.col.Schema.VersionID, + schemaVersionID: f.col.Schema().VersionID, status: status, properties: properties, }, nil @@ -283,7 +283,7 @@ func (f *lensedFetcher) updateDataStore(ctx context.Context, original map[string } datastoreKeyBase := core.DataStoreKey{ - CollectionID: f.col.IDString(), + CollectionID: f.col.Description().IDString(), DocKey: dockey, InstanceType: core.ValueKey, } diff --git a/lens/history.go b/lens/history.go index 0b2a914d94..56b43a9d5b 100644 --- a/lens/history.go +++ b/lens/history.go @@ -70,10 +70,10 @@ func getTargetedSchemaHistory( ctx context.Context, txn datastore.Txn, lensConfigs []client.LensConfig, - schemaID string, + schemaRoot string, targetSchemaVersionID string, ) (map[schemaVersionID]*targetedSchemaHistoryLink, error) { - history, err := getSchemaHistory(ctx, txn, lensConfigs, schemaID) + history, err := getSchemaHistory(ctx, txn, lensConfigs, schemaRoot) if err != nil { return nil, err } @@ -183,7 +183,7 @@ func getSchemaHistory( ctx context.Context, txn datastore.Txn, lensConfigs []client.LensConfig, - schemaID string, + schemaRoot string, ) (map[schemaVersionID]*schemaHistoryLink, error) { pairings := map[string]*schemaHistoryPairing{} @@ -200,7 +200,7 @@ func getSchemaHistory( } } - prefix := core.NewSchemaHistoryKey(schemaID, "") + prefix := core.NewSchemaHistoryKey(schemaRoot, "") q, err := txn.Systemstore().Query(ctx, query.Query{ Prefix: prefix.ToString(), }) diff --git a/licenses/BSL.txt b/licenses/BSL.txt index 1847b1aee1..bd545e07fc 100644 --- a/licenses/BSL.txt +++ b/licenses/BSL.txt @@ -7,7 +7,7 @@ Parameters Licensor: Democratized Data (D2) Foundation -Licensed Work: DefraDB v0.7.0 +Licensed Work: DefraDB v0.8.0 The Licensed Work is (c) 2023 D2 Foundation. @@ -28,7 +28,7 @@ Additional Use Grant: You may only use the Licensed Work for the -Change Date: 2027-09-18 +Change Date: 2027-11-14 Change License: Apache License, Version 2.0 diff --git a/logging/registry.go b/logging/registry.go index 7cd7b808a2..9410498a72 100644 --- a/logging/registry.go +++ b/logging/registry.go @@ -44,6 +44,9 @@ func setConfig(newConfig Config) Config { } func updateLoggers(config Config) { + registryMutex.Lock() + defer registryMutex.Unlock() + for loggerName, loggers := range registry { newLoggerConfig := config.forLogger(loggerName) diff --git a/merkle/clock/clock.go b/merkle/clock/clock.go index 95151e2e76..2bdc9fda93 100644 --- a/merkle/clock/clock.go +++ b/merkle/clock/clock.go @@ -103,10 +103,8 @@ func (mc *MerkleClock) AddDAGNode( } // apply the new node and merge the delta with state - // @todo Remove NodeGetter as a parameter, and move it to a MerkleClock field - _, err = mc.ProcessNode( + err = mc.ProcessNode( ctx, - &CrdtNodeGetter{DeltaExtractor: mc.crdt.DeltaDecode}, delta, nd, ) @@ -117,17 +115,16 @@ func (mc *MerkleClock) AddDAGNode( // ProcessNode processes an already merged delta into a CRDT by adding it to the state. func (mc *MerkleClock) ProcessNode( ctx context.Context, - ng core.NodeGetter, delta core.Delta, node ipld.Node, -) ([]cid.Cid, error) { +) error { nodeCid := node.Cid() priority := delta.GetPriority() log.Debug(ctx, "Running ProcessNode", logging.NewKV("CID", nodeCid)) err := mc.crdt.Merge(ctx, delta) if err != nil { - return nil, NewErrMergingDelta(nodeCid, err) + return NewErrMergingDelta(nodeCid, err) } links := node.Links() @@ -145,18 +142,16 @@ func (mc *MerkleClock) ProcessNode( log.Debug(ctx, "No heads found") err := mc.headset.Write(ctx, nodeCid, priority) if err != nil { - return nil, NewErrAddingHead(nodeCid, err) + return NewErrAddingHead(nodeCid, err) } } - children := []cid.Cid{} - for _, l := range links { linkCid := l.Cid log.Debug(ctx, "Scanning for replacement heads", logging.NewKV("Child", linkCid)) isHead, err := mc.headset.IsHead(ctx, linkCid) if err != nil { - return nil, NewErrCheckingHead(linkCid, err) + return NewErrCheckingHead(linkCid, err) } if isHead { @@ -165,7 +160,7 @@ func (mc *MerkleClock) ProcessNode( // of current branch err = mc.headset.Replace(ctx, linkCid, nodeCid, priority) if err != nil { - return nil, NewErrReplacingHead(linkCid, nodeCid, err) + return NewErrReplacingHead(linkCid, nodeCid, err) } continue @@ -173,7 +168,7 @@ func (mc *MerkleClock) ProcessNode( known, err := mc.dagstore.Has(ctx, linkCid) if err != nil { - return nil, NewErrCouldNotFindBlock(linkCid, err) + return NewErrCouldNotFindBlock(linkCid, err) } if known { // we reached a non-head node in the known tree. @@ -192,11 +187,9 @@ func (mc *MerkleClock) ProcessNode( } continue } - - children = append(children, linkCid) } - return children, nil + return nil } // Heads returns the current heads of the MerkleClock. diff --git a/merkle/clock/clock_test.go b/merkle/clock/clock_test.go index bdf8dcaeec..a804165062 100644 --- a/merkle/clock/clock_test.go +++ b/merkle/clock/clock_test.go @@ -30,17 +30,15 @@ func newDS() ds.Datastore { func newTestMerkleClock() *MerkleClock { s := newDS() - rw := datastore.AsDSReaderWriter(s) - multistore := datastore.MultiStoreFrom(rw) - reg := crdt.NewLWWRegister(rw, core.CollectionSchemaVersionKey{}, core.DataStoreKey{}, "") + multistore := datastore.MultiStoreFrom(s) + reg := crdt.NewLWWRegister(multistore.Rootstore(), core.CollectionSchemaVersionKey{}, core.DataStoreKey{}, "") return NewMerkleClock(multistore.Headstore(), multistore.DAGstore(), core.HeadStoreKey{DocKey: "dockey", FieldId: "1"}, reg).(*MerkleClock) } func TestNewMerkleClock(t *testing.T) { s := newDS() - rw := datastore.AsDSReaderWriter(s) - multistore := datastore.MultiStoreFrom(rw) - reg := crdt.NewLWWRegister(rw, core.CollectionSchemaVersionKey{}, core.DataStoreKey{}, "") + multistore := datastore.MultiStoreFrom(s) + reg := crdt.NewLWWRegister(multistore.Rootstore(), core.CollectionSchemaVersionKey{}, core.DataStoreKey{}, "") clk := NewMerkleClock(multistore.Headstore(), multistore.DAGstore(), core.HeadStoreKey{}, reg).(*MerkleClock) if clk.headstore != multistore.Headstore() { diff --git a/merkle/crdt/composite.go b/merkle/crdt/composite.go index d851eb65bb..704c65fcd0 100644 --- a/merkle/crdt/composite.go +++ b/merkle/crdt/composite.go @@ -27,7 +27,7 @@ var ( compFactoryFn = MerkleCRDTFactory( func( mstore datastore.MultiStore, - schemaID core.CollectionSchemaVersionKey, + schemaRoot core.CollectionSchemaVersionKey, uCh events.UpdateChannel, fieldName string, ) MerkleCRDTInitFn { @@ -36,7 +36,7 @@ var ( mstore.Datastore(), mstore.Headstore(), mstore.DAGstore(), - schemaID, + schemaRoot, uCh, core.DataStoreKey{}, key, diff --git a/merkle/crdt/factory.go b/merkle/crdt/factory.go index 36ad681a9a..04dc3d5aef 100644 --- a/merkle/crdt/factory.go +++ b/merkle/crdt/factory.go @@ -137,6 +137,14 @@ func (factory Factory) Headstore() datastore.DSReaderWriter { return factory.multistore.Headstore() } +// Peerstore implements datastore.MultiStore and returns the current Peerstore. +func (factory Factory) Peerstore() datastore.DSBatching { + if factory.multistore == nil { + return nil + } + return factory.multistore.Peerstore() +} + // Head implements datastore.MultiStore and returns the current Headstore. func (factory Factory) Systemstore() datastore.DSReaderWriter { if factory.multistore == nil { diff --git a/merkle/crdt/factory_test.go b/merkle/crdt/factory_test.go index 15879ec2dd..10e2f5c672 100644 --- a/merkle/crdt/factory_test.go +++ b/merkle/crdt/factory_test.go @@ -25,8 +25,7 @@ import ( func newStores() datastore.MultiStore { root := ds.NewMapDatastore() - rw := datastore.AsDSReaderWriter(root) - return datastore.MultiStoreFrom(rw) + return datastore.MultiStoreFrom(root) } func TestNewBlankFactory(t *testing.T) { diff --git a/merkle/crdt/lwwreg.go b/merkle/crdt/lwwreg.go index 87b00e2151..796451c041 100644 --- a/merkle/crdt/lwwreg.go +++ b/merkle/crdt/lwwreg.go @@ -27,7 +27,7 @@ var ( lwwFactoryFn = MerkleCRDTFactory( func( mstore datastore.MultiStore, - schemaID core.CollectionSchemaVersionKey, + schemaRoot core.CollectionSchemaVersionKey, _ events.UpdateChannel, fieldName string, ) MerkleCRDTInitFn { @@ -36,7 +36,7 @@ var ( mstore.Datastore(), mstore.Headstore(), mstore.DAGstore(), - schemaID, + schemaRoot, core.DataStoreKey{}, key, fieldName, diff --git a/merkle/crdt/merklecrdt_test.go b/merkle/crdt/merklecrdt_test.go index 6093bf2d97..675fcfe38f 100644 --- a/merkle/crdt/merklecrdt_test.go +++ b/merkle/crdt/merklecrdt_test.go @@ -31,12 +31,11 @@ func newDS() ds.Datastore { func newTestBaseMerkleCRDT() (*baseMerkleCRDT, datastore.DSReaderWriter) { s := newDS() - rw := datastore.AsDSReaderWriter(s) - multistore := datastore.MultiStoreFrom(rw) + multistore := datastore.MultiStoreFrom(s) reg := corecrdt.NewLWWRegister(multistore.Datastore(), core.CollectionSchemaVersionKey{}, core.DataStoreKey{}, "") clk := clock.NewMerkleClock(multistore.Headstore(), multistore.DAGstore(), core.HeadStoreKey{}, reg) - return &baseMerkleCRDT{clock: clk, crdt: reg}, rw + return &baseMerkleCRDT{clock: clk, crdt: reg}, multistore.Rootstore() } func TestMerkleCRDTPublish(t *testing.T) { diff --git a/net/api/client/client.go b/net/api/client/client.go deleted file mode 100644 index 2ea92bd14c..0000000000 --- a/net/api/client/client.go +++ /dev/null @@ -1,169 +0,0 @@ -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "context" - "fmt" - - "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" - codec "github.com/planetscale/vtprotobuf/codec/grpc" - "google.golang.org/grpc" - "google.golang.org/grpc/encoding" - _ "google.golang.org/grpc/encoding/proto" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/errors" - pb "github.com/sourcenetwork/defradb/net/pb" -) - -func init() { - encoding.RegisterCodec(codec.Codec{}) -} - -type Client struct { - c pb.CollectionClient - conn *grpc.ClientConn -} - -// NewClient returns a new defra gRPC client connected to the target address. -func NewClient(target string, opts ...grpc.DialOption) (*Client, error) { - conn, err := grpc.Dial(target, opts...) - if err != nil { - return nil, err - } - - return &Client{ - c: pb.NewCollectionClient(conn), - conn: conn, - }, nil -} - -func (c *Client) Close() error { - return c.conn.Close() -} - -// SetReplicator sends a request to add a target replicator to the DB peer. -func (c *Client) SetReplicator( - ctx context.Context, - paddr ma.Multiaddr, - collections ...string, -) (peer.ID, error) { - if paddr == nil { - return "", errors.New("target address can't be empty") - } - resp, err := c.c.SetReplicator(ctx, &pb.SetReplicatorRequest{ - Collections: collections, - Addr: paddr.Bytes(), - }) - if err != nil { - return "", errors.Wrap("could not add replicator", err) - } - return peer.IDFromBytes(resp.PeerID) -} - -// DeleteReplicator sends a request to add a target replicator to the DB peer. -func (c *Client) DeleteReplicator( - ctx context.Context, - pid peer.ID, - collections ...string, -) error { - _, err := c.c.DeleteReplicator(ctx, &pb.DeleteReplicatorRequest{ - PeerID: []byte(pid), - }) - return err -} - -// GetAllReplicators sends a request to add a target replicator to the DB peer. -func (c *Client) GetAllReplicators( - ctx context.Context, -) ([]client.Replicator, error) { - resp, err := c.c.GetAllReplicators(ctx, &pb.GetAllReplicatorRequest{}) - if err != nil { - return nil, errors.Wrap("could not get replicators", err) - } - reps := []client.Replicator{} - for _, rep := range resp.Replicators { - addr, err := ma.NewMultiaddrBytes(rep.Info.Addrs) - if err != nil { - return nil, errors.WithStack(err) - } - - pid, err := peer.IDFromBytes(rep.Info.Id) - if err != nil { - return nil, errors.WithStack(err) - } - - reps = append(reps, client.Replicator{ - Info: peer.AddrInfo{ - ID: pid, - Addrs: []ma.Multiaddr{addr}, - }, - Schemas: rep.Schemas, - }) - } - return reps, nil -} - -// AddP2PCollections sends a request to add P2P collecctions to the stored list. -func (c *Client) AddP2PCollections( - ctx context.Context, - collections ...string, -) error { - resp, err := c.c.AddP2PCollections(ctx, &pb.AddP2PCollectionsRequest{ - Collections: collections, - }) - if err != nil { - return errors.Wrap("could not add P2P collection topics", err) - } - if resp.Err != "" { - return errors.New(fmt.Sprintf("could not add P2P collection topics: %s", resp)) - } - return nil -} - -// RemoveP2PCollections sends a request to remove P2P collecctions from the stored list. -func (c *Client) RemoveP2PCollections( - ctx context.Context, - collections ...string, -) error { - resp, err := c.c.RemoveP2PCollections(ctx, &pb.RemoveP2PCollectionsRequest{ - Collections: collections, - }) - if err != nil { - return errors.Wrap("could not remove P2P collection topics", err) - } - if resp.Err != "" { - return errors.New(fmt.Sprintf("could not remove P2P collection topics: %s", resp)) - } - return nil -} - -// RemoveP2PCollections sends a request to get all P2P collecctions from the stored list. -func (c *Client) GetAllP2PCollections( - ctx context.Context, -) ([]client.P2PCollection, error) { - resp, err := c.c.GetAllP2PCollections(ctx, &pb.GetAllP2PCollectionsRequest{}) - if err != nil { - return nil, errors.Wrap("could not get all P2P collection topics", err) - } - var collections []client.P2PCollection - for _, col := range resp.Collections { - collections = append(collections, client.P2PCollection{ - ID: col.Id, - Name: col.Name, - }) - } - return collections, nil -} diff --git a/net/api/pb/Makefile b/net/api/pb/Makefile deleted file mode 100644 index 62eef77354..0000000000 --- a/net/api/pb/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -PB = $(wildcard *.proto) -GO = $(PB:.proto=.pb.go) - -all: $(GO) - -%.pb.go: %.proto - protoc \ - --go_out=. --plugin protoc-gen-go="${GOBIN}/protoc-gen-go" \ - --go-grpc_out=. --plugin protoc-gen-go-grpc="${GOBIN}/protoc-gen-go-grpc" \ - --go-vtproto_out=. --plugin protoc-gen-go-vtproto="${GOBIN}/protoc-gen-go-vtproto" \ - --go-vtproto_opt=features=marshal+unmarshal+size \ - $< - -clean: - rm -f *.pb.go - rm -f *pb_test.go - -.PHONY: clean \ No newline at end of file diff --git a/net/api/pb/api.pb.go b/net/api/pb/api.pb.go deleted file mode 100644 index ad48069b8f..0000000000 --- a/net/api/pb/api.pb.go +++ /dev/null @@ -1,1100 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.9 -// source: api.proto - -package api_pb - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type SetReplicatorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` - Addr []byte `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` -} - -func (x *SetReplicatorRequest) Reset() { - *x = SetReplicatorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetReplicatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetReplicatorRequest) ProtoMessage() {} - -func (x *SetReplicatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[0] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetReplicatorRequest.ProtoReflect.Descriptor instead. -func (*SetReplicatorRequest) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{0} -} - -func (x *SetReplicatorRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -func (x *SetReplicatorRequest) GetAddr() []byte { - if x != nil { - return x.Addr - } - return nil -} - -type SetReplicatorReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` -} - -func (x *SetReplicatorReply) Reset() { - *x = SetReplicatorReply{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetReplicatorReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetReplicatorReply) ProtoMessage() {} - -func (x *SetReplicatorReply) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[1] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetReplicatorReply.ProtoReflect.Descriptor instead. -func (*SetReplicatorReply) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{1} -} - -func (x *SetReplicatorReply) GetPeerID() []byte { - if x != nil { - return x.PeerID - } - return nil -} - -type DeleteReplicatorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` -} - -func (x *DeleteReplicatorRequest) Reset() { - *x = DeleteReplicatorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteReplicatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteReplicatorRequest) ProtoMessage() {} - -func (x *DeleteReplicatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[2] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteReplicatorRequest.ProtoReflect.Descriptor instead. -func (*DeleteReplicatorRequest) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{2} -} - -func (x *DeleteReplicatorRequest) GetPeerID() []byte { - if x != nil { - return x.PeerID - } - return nil -} - -type DeleteReplicatorReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` -} - -func (x *DeleteReplicatorReply) Reset() { - *x = DeleteReplicatorReply{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteReplicatorReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteReplicatorReply) ProtoMessage() {} - -func (x *DeleteReplicatorReply) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[3] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteReplicatorReply.ProtoReflect.Descriptor instead. -func (*DeleteReplicatorReply) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{3} -} - -func (x *DeleteReplicatorReply) GetPeerID() []byte { - if x != nil { - return x.PeerID - } - return nil -} - -type GetAllReplicatorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetAllReplicatorRequest) Reset() { - *x = GetAllReplicatorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllReplicatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllReplicatorRequest) ProtoMessage() {} - -func (x *GetAllReplicatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[4] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllReplicatorRequest.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorRequest) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{4} -} - -type GetAllReplicatorReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Replicators []*GetAllReplicatorReply_Replicators `protobuf:"bytes,1,rep,name=replicators,proto3" json:"replicators,omitempty"` -} - -func (x *GetAllReplicatorReply) Reset() { - *x = GetAllReplicatorReply{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllReplicatorReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllReplicatorReply) ProtoMessage() {} - -func (x *GetAllReplicatorReply) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[5] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllReplicatorReply.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorReply) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{5} -} - -func (x *GetAllReplicatorReply) GetReplicators() []*GetAllReplicatorReply_Replicators { - if x != nil { - return x.Replicators - } - return nil -} - -type AddP2PCollectionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *AddP2PCollectionsRequest) Reset() { - *x = AddP2PCollectionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddP2PCollectionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddP2PCollectionsRequest) ProtoMessage() {} - -func (x *AddP2PCollectionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[6] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddP2PCollectionsRequest.ProtoReflect.Descriptor instead. -func (*AddP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{6} -} - -func (x *AddP2PCollectionsRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -type AddP2PCollectionsReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` -} - -func (x *AddP2PCollectionsReply) Reset() { - *x = AddP2PCollectionsReply{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddP2PCollectionsReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddP2PCollectionsReply) ProtoMessage() {} - -func (x *AddP2PCollectionsReply) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[7] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddP2PCollectionsReply.ProtoReflect.Descriptor instead. -func (*AddP2PCollectionsReply) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{7} -} - -func (x *AddP2PCollectionsReply) GetErr() string { - if x != nil { - return x.Err - } - return "" -} - -type RemoveP2PCollectionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *RemoveP2PCollectionsRequest) Reset() { - *x = RemoveP2PCollectionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveP2PCollectionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveP2PCollectionsRequest) ProtoMessage() {} - -func (x *RemoveP2PCollectionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[8] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveP2PCollectionsRequest.ProtoReflect.Descriptor instead. -func (*RemoveP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{8} -} - -func (x *RemoveP2PCollectionsRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -type RemoveP2PCollectionsReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` -} - -func (x *RemoveP2PCollectionsReply) Reset() { - *x = RemoveP2PCollectionsReply{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveP2PCollectionsReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveP2PCollectionsReply) ProtoMessage() {} - -func (x *RemoveP2PCollectionsReply) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[9] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveP2PCollectionsReply.ProtoReflect.Descriptor instead. -func (*RemoveP2PCollectionsReply) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{9} -} - -func (x *RemoveP2PCollectionsReply) GetErr() string { - if x != nil { - return x.Err - } - return "" -} - -type GetAllP2PCollectionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetAllP2PCollectionsRequest) Reset() { - *x = GetAllP2PCollectionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllP2PCollectionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllP2PCollectionsRequest) ProtoMessage() {} - -func (x *GetAllP2PCollectionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[10] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllP2PCollectionsRequest.ProtoReflect.Descriptor instead. -func (*GetAllP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{10} -} - -type GetAllP2PCollectionsReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []*GetAllP2PCollectionsReply_Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *GetAllP2PCollectionsReply) Reset() { - *x = GetAllP2PCollectionsReply{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllP2PCollectionsReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllP2PCollectionsReply) ProtoMessage() {} - -func (x *GetAllP2PCollectionsReply) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllP2PCollectionsReply.ProtoReflect.Descriptor instead. -func (*GetAllP2PCollectionsReply) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{11} -} - -func (x *GetAllP2PCollectionsReply) GetCollections() []*GetAllP2PCollectionsReply_Collection { - if x != nil { - return x.Collections - } - return nil -} - -type GetAllReplicatorReply_Replicators struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Info *GetAllReplicatorReply_Replicators_Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` - Schemas []string `protobuf:"bytes,2,rep,name=schemas,proto3" json:"schemas,omitempty"` -} - -func (x *GetAllReplicatorReply_Replicators) Reset() { - *x = GetAllReplicatorReply_Replicators{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllReplicatorReply_Replicators) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllReplicatorReply_Replicators) ProtoMessage() {} - -func (x *GetAllReplicatorReply_Replicators) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllReplicatorReply_Replicators.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorReply_Replicators) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{5, 0} -} - -func (x *GetAllReplicatorReply_Replicators) GetInfo() *GetAllReplicatorReply_Replicators_Info { - if x != nil { - return x.Info - } - return nil -} - -func (x *GetAllReplicatorReply_Replicators) GetSchemas() []string { - if x != nil { - return x.Schemas - } - return nil -} - -type GetAllReplicatorReply_Replicators_Info struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Addrs []byte `protobuf:"bytes,2,opt,name=addrs,proto3" json:"addrs,omitempty"` -} - -func (x *GetAllReplicatorReply_Replicators_Info) Reset() { - *x = GetAllReplicatorReply_Replicators_Info{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllReplicatorReply_Replicators_Info) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllReplicatorReply_Replicators_Info) ProtoMessage() {} - -func (x *GetAllReplicatorReply_Replicators_Info) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllReplicatorReply_Replicators_Info.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorReply_Replicators_Info) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{5, 0, 0} -} - -func (x *GetAllReplicatorReply_Replicators_Info) GetId() []byte { - if x != nil { - return x.Id - } - return nil -} - -func (x *GetAllReplicatorReply_Replicators_Info) GetAddrs() []byte { - if x != nil { - return x.Addrs - } - return nil -} - -type GetAllP2PCollectionsReply_Collection struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetAllP2PCollectionsReply_Collection) Reset() { - *x = GetAllP2PCollectionsReply_Collection{} - if protoimpl.UnsafeEnabled { - mi := &file_api_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllP2PCollectionsReply_Collection) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllP2PCollectionsReply_Collection) ProtoMessage() {} - -func (x *GetAllP2PCollectionsReply_Collection) ProtoReflect() protoreflect.Message { - mi := &file_api_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllP2PCollectionsReply_Collection.ProtoReflect.Descriptor instead. -func (*GetAllP2PCollectionsReply_Collection) Descriptor() ([]byte, []int) { - return file_api_proto_rawDescGZIP(), []int{11, 0} -} - -func (x *GetAllP2PCollectionsReply_Collection) GetId() string { - if x != nil { - return x.Id - } - return "" -} - -func (x *GetAllP2PCollectionsReply_Collection) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -var File_api_proto protoreflect.FileDescriptor - -var file_api_proto_rawDesc = []byte{ - 0x0a, 0x09, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x61, 0x70, 0x69, - 0x2e, 0x70, 0x62, 0x22, 0x4c, 0x0a, 0x14, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, - 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, - 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, - 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x64, 0x64, - 0x72, 0x22, 0x2c, 0x0a, 0x12, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, - 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x22, - 0x31, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, - 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, - 0x49, 0x44, 0x22, 0x2f, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, - 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, - 0x72, 0x49, 0x44, 0x22, 0x19, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x80, - 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x4b, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, - 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x6f, 0x72, 0x73, 0x1a, 0x99, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, - 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x42, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, - 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x49, - 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, - 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, - 0x6d, 0x61, 0x73, 0x1a, 0x2c, 0x0a, 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, - 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, - 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, - 0x73, 0x22, 0x3c, 0x0a, 0x18, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, - 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, - 0x2a, 0x0a, 0x16, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x3f, 0x0a, 0x1b, 0x52, - 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, - 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2d, 0x0a, 0x19, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1d, 0x0a, 0x1b, 0x47, - 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x19, 0x47, - 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x4e, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, - 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, - 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, - 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x30, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xa0, 0x04, 0x0a, 0x07, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x4b, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, - 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x53, - 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, - 0x79, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, - 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, - 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x11, 0x47, 0x65, 0x74, - 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x1f, - 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, - 0x1d, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, - 0x12, 0x57, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x41, - 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, - 0x2e, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x52, 0x65, 0x6d, - 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x12, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, - 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, - 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x47, - 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, - 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x61, 0x70, 0x69, 0x2e, 0x70, - 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0a, 0x5a, - 0x08, 0x2f, 0x3b, 0x61, 0x70, 0x69, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, -} - -var ( - file_api_proto_rawDescOnce sync.Once - file_api_proto_rawDescData = file_api_proto_rawDesc -) - -func file_api_proto_rawDescGZIP() []byte { - file_api_proto_rawDescOnce.Do(func() { - file_api_proto_rawDescData = protoimpl.X.CompressGZIP(file_api_proto_rawDescData) - }) - return file_api_proto_rawDescData -} - -var file_api_proto_msgTypes = make([]protoimpl.MessageInfo, 15) -var file_api_proto_goTypes = []interface{}{ - (*SetReplicatorRequest)(nil), // 0: api.pb.SetReplicatorRequest - (*SetReplicatorReply)(nil), // 1: api.pb.SetReplicatorReply - (*DeleteReplicatorRequest)(nil), // 2: api.pb.DeleteReplicatorRequest - (*DeleteReplicatorReply)(nil), // 3: api.pb.DeleteReplicatorReply - (*GetAllReplicatorRequest)(nil), // 4: api.pb.GetAllReplicatorRequest - (*GetAllReplicatorReply)(nil), // 5: api.pb.GetAllReplicatorReply - (*AddP2PCollectionsRequest)(nil), // 6: api.pb.AddP2PCollectionsRequest - (*AddP2PCollectionsReply)(nil), // 7: api.pb.AddP2PCollectionsReply - (*RemoveP2PCollectionsRequest)(nil), // 8: api.pb.RemoveP2PCollectionsRequest - (*RemoveP2PCollectionsReply)(nil), // 9: api.pb.RemoveP2PCollectionsReply - (*GetAllP2PCollectionsRequest)(nil), // 10: api.pb.GetAllP2PCollectionsRequest - (*GetAllP2PCollectionsReply)(nil), // 11: api.pb.GetAllP2PCollectionsReply - (*GetAllReplicatorReply_Replicators)(nil), // 12: api.pb.GetAllReplicatorReply.Replicators - (*GetAllReplicatorReply_Replicators_Info)(nil), // 13: api.pb.GetAllReplicatorReply.Replicators.Info - (*GetAllP2PCollectionsReply_Collection)(nil), // 14: api.pb.GetAllP2PCollectionsReply.Collection -} -var file_api_proto_depIdxs = []int32{ - 12, // 0: api.pb.GetAllReplicatorReply.replicators:type_name -> api.pb.GetAllReplicatorReply.Replicators - 14, // 1: api.pb.GetAllP2PCollectionsReply.collections:type_name -> api.pb.GetAllP2PCollectionsReply.Collection - 13, // 2: api.pb.GetAllReplicatorReply.Replicators.info:type_name -> api.pb.GetAllReplicatorReply.Replicators.Info - 0, // 3: api.pb.Service.SetReplicator:input_type -> api.pb.SetReplicatorRequest - 2, // 4: api.pb.Service.DeleteReplicator:input_type -> api.pb.DeleteReplicatorRequest - 4, // 5: api.pb.Service.GetAllReplicators:input_type -> api.pb.GetAllReplicatorRequest - 6, // 6: api.pb.Service.AddP2PCollections:input_type -> api.pb.AddP2PCollectionsRequest - 8, // 7: api.pb.Service.RemoveP2PCollections:input_type -> api.pb.RemoveP2PCollectionsRequest - 10, // 8: api.pb.Service.GetAllP2PCollections:input_type -> api.pb.GetAllP2PCollectionsRequest - 1, // 9: api.pb.Service.SetReplicator:output_type -> api.pb.SetReplicatorReply - 3, // 10: api.pb.Service.DeleteReplicator:output_type -> api.pb.DeleteReplicatorReply - 5, // 11: api.pb.Service.GetAllReplicators:output_type -> api.pb.GetAllReplicatorReply - 7, // 12: api.pb.Service.AddP2PCollections:output_type -> api.pb.AddP2PCollectionsReply - 9, // 13: api.pb.Service.RemoveP2PCollections:output_type -> api.pb.RemoveP2PCollectionsReply - 11, // 14: api.pb.Service.GetAllP2PCollections:output_type -> api.pb.GetAllP2PCollectionsReply - 9, // [9:15] is the sub-list for method output_type - 3, // [3:9] is the sub-list for method input_type - 3, // [3:3] is the sub-list for extension type_name - 3, // [3:3] is the sub-list for extension extendee - 0, // [0:3] is the sub-list for field type_name -} - -func init() { file_api_proto_init() } -func file_api_proto_init() { - if File_api_proto != nil { - return - } - if !protoimpl.UnsafeEnabled { - file_api_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReplicatorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReplicatorReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteReplicatorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteReplicatorReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddP2PCollectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddP2PCollectionsReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveP2PCollectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveP2PCollectionsReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllP2PCollectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllP2PCollectionsReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorReply_Replicators); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorReply_Replicators_Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_api_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllP2PCollectionsReply_Collection); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_api_proto_rawDesc, - NumEnums: 0, - NumMessages: 15, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_api_proto_goTypes, - DependencyIndexes: file_api_proto_depIdxs, - MessageInfos: file_api_proto_msgTypes, - }.Build() - File_api_proto = out.File - file_api_proto_rawDesc = nil - file_api_proto_goTypes = nil - file_api_proto_depIdxs = nil -} diff --git a/net/api/pb/api.proto b/net/api/pb/api.proto deleted file mode 100644 index 367997c7af..0000000000 --- a/net/api/pb/api.proto +++ /dev/null @@ -1,82 +0,0 @@ -syntax = "proto3"; -package api.pb; - -option go_package = "/;api_pb"; - -message SetReplicatorRequest { - repeated string collections = 1; - bytes addr = 2; -} - -message SetReplicatorReply { - bytes peerID = 1; -} - -message DeleteReplicatorRequest { - bytes peerID = 1; -} - -message DeleteReplicatorReply { - bytes peerID = 1; -} - -message GetAllReplicatorRequest {} - -message GetAllReplicatorReply { - message Replicators { - message Info { - bytes id = 1; - bytes addrs = 2; - } - Info info = 1; - repeated string schemas = 2; - } - - repeated Replicators replicators = 1; - -} - -message AddP2PCollectionsRequest { - repeated string collections = 1; -} - -message AddP2PCollectionsReply { - string err = 1; -} - -message RemoveP2PCollectionsRequest { - repeated string collections = 1; -} - -message RemoveP2PCollectionsReply { - string err = 1; -} - -message GetAllP2PCollectionsRequest {} - -message GetAllP2PCollectionsReply { - message Collection { - string id = 1; - string name = 2; - } - repeated Collection collections = 1; -} - - -// Service is the peer-to-peer network API for document sync -service Service { - // SetReplicator for this peer - rpc SetReplicator(SetReplicatorRequest) returns (SetReplicatorReply) {} - - // DeleteReplicator for this peer - rpc DeleteReplicator(DeleteReplicatorRequest) returns (DeleteReplicatorReply) {} - - // DeleteReplicator for this peer - rpc GetAllReplicators(GetAllReplicatorRequest) returns (GetAllReplicatorReply) {} - - rpc AddP2PCollections(AddP2PCollectionsRequest) returns (AddP2PCollectionsReply) {} - - rpc RemoveP2PCollections(RemoveP2PCollectionsRequest) returns (RemoveP2PCollectionsReply) {} - - rpc GetAllP2PCollections(GetAllP2PCollectionsRequest) returns (GetAllP2PCollectionsReply) {} -} \ No newline at end of file diff --git a/net/api/pb/api_grpc.pb.go b/net/api/pb/api_grpc.pb.go deleted file mode 100644 index 5d1bc204d3..0000000000 --- a/net/api/pb/api_grpc.pb.go +++ /dev/null @@ -1,300 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.3.0 -// - protoc v3.21.9 -// source: api.proto - -package api_pb - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.32.0 or later. -const _ = grpc.SupportPackageIsVersion7 - -const ( - Service_SetReplicator_FullMethodName = "/api.pb.Service/SetReplicator" - Service_DeleteReplicator_FullMethodName = "/api.pb.Service/DeleteReplicator" - Service_GetAllReplicators_FullMethodName = "/api.pb.Service/GetAllReplicators" - Service_AddP2PCollections_FullMethodName = "/api.pb.Service/AddP2PCollections" - Service_RemoveP2PCollections_FullMethodName = "/api.pb.Service/RemoveP2PCollections" - Service_GetAllP2PCollections_FullMethodName = "/api.pb.Service/GetAllP2PCollections" -) - -// ServiceClient is the client API for Service service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type ServiceClient interface { - // SetReplicator for this peer - SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) - // DeleteReplicator for this peer - DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) - // DeleteReplicator for this peer - GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) - AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) - RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) - GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) -} - -type serviceClient struct { - cc grpc.ClientConnInterface -} - -func NewServiceClient(cc grpc.ClientConnInterface) ServiceClient { - return &serviceClient{cc} -} - -func (c *serviceClient) SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) { - out := new(SetReplicatorReply) - err := c.cc.Invoke(ctx, Service_SetReplicator_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceClient) DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) { - out := new(DeleteReplicatorReply) - err := c.cc.Invoke(ctx, Service_DeleteReplicator_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceClient) GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) { - out := new(GetAllReplicatorReply) - err := c.cc.Invoke(ctx, Service_GetAllReplicators_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceClient) AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) { - out := new(AddP2PCollectionsReply) - err := c.cc.Invoke(ctx, Service_AddP2PCollections_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceClient) RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) { - out := new(RemoveP2PCollectionsReply) - err := c.cc.Invoke(ctx, Service_RemoveP2PCollections_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *serviceClient) GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) { - out := new(GetAllP2PCollectionsReply) - err := c.cc.Invoke(ctx, Service_GetAllP2PCollections_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ServiceServer is the server API for Service service. -// All implementations must embed UnimplementedServiceServer -// for forward compatibility -type ServiceServer interface { - // SetReplicator for this peer - SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) - // DeleteReplicator for this peer - DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) - // DeleteReplicator for this peer - GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) - AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) - RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) - GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) - mustEmbedUnimplementedServiceServer() -} - -// UnimplementedServiceServer must be embedded to have forward compatible implementations. -type UnimplementedServiceServer struct { -} - -func (UnimplementedServiceServer) SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetReplicator not implemented") -} -func (UnimplementedServiceServer) DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteReplicator not implemented") -} -func (UnimplementedServiceServer) GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllReplicators not implemented") -} -func (UnimplementedServiceServer) AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddP2PCollections not implemented") -} -func (UnimplementedServiceServer) RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveP2PCollections not implemented") -} -func (UnimplementedServiceServer) GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllP2PCollections not implemented") -} -func (UnimplementedServiceServer) mustEmbedUnimplementedServiceServer() {} - -// UnsafeServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to ServiceServer will -// result in compilation errors. -type UnsafeServiceServer interface { - mustEmbedUnimplementedServiceServer() -} - -func RegisterServiceServer(s grpc.ServiceRegistrar, srv ServiceServer) { - s.RegisterService(&Service_ServiceDesc, srv) -} - -func _Service_SetReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).SetReplicator(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Service_SetReplicator_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).SetReplicator(ctx, req.(*SetReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Service_DeleteReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).DeleteReplicator(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Service_DeleteReplicator_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).DeleteReplicator(ctx, req.(*DeleteReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Service_GetAllReplicators_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).GetAllReplicators(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Service_GetAllReplicators_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetAllReplicators(ctx, req.(*GetAllReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Service_AddP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).AddP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Service_AddP2PCollections_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).AddP2PCollections(ctx, req.(*AddP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Service_RemoveP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).RemoveP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Service_RemoveP2PCollections_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).RemoveP2PCollections(ctx, req.(*RemoveP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Service_GetAllP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ServiceServer).GetAllP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Service_GetAllP2PCollections_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ServiceServer).GetAllP2PCollections(ctx, req.(*GetAllP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// Service_ServiceDesc is the grpc.ServiceDesc for Service service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Service_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "api.pb.Service", - HandlerType: (*ServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SetReplicator", - Handler: _Service_SetReplicator_Handler, - }, - { - MethodName: "DeleteReplicator", - Handler: _Service_DeleteReplicator_Handler, - }, - { - MethodName: "GetAllReplicators", - Handler: _Service_GetAllReplicators_Handler, - }, - { - MethodName: "AddP2PCollections", - Handler: _Service_AddP2PCollections_Handler, - }, - { - MethodName: "RemoveP2PCollections", - Handler: _Service_RemoveP2PCollections_Handler, - }, - { - MethodName: "GetAllP2PCollections", - Handler: _Service_GetAllP2PCollections_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "api.proto", -} diff --git a/net/api/pb/api_vtproto.pb.go b/net/api/pb/api_vtproto.pb.go deleted file mode 100644 index e4ddfb9bcb..0000000000 --- a/net/api/pb/api_vtproto.pb.go +++ /dev/null @@ -1,2316 +0,0 @@ -// Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 -// source: api.proto - -package api_pb - -import ( - fmt "fmt" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - io "io" - bits "math/bits" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -func (m *SetReplicatorRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SetReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *SetReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Addr) > 0 { - i -= len(m.Addr) - copy(dAtA[i:], m.Addr) - i = encodeVarint(dAtA, i, uint64(len(m.Addr))) - i-- - dAtA[i] = 0x12 - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *SetReplicatorReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *SetReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *SetReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteReplicatorRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *DeleteReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteReplicatorReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *DeleteReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllReplicatorRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - return len(dAtA) - i, nil -} - -func (m *GetAllReplicatorReply_Replicators_Info) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply_Replicators_Info) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllReplicatorReply_Replicators_Info) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Addrs) > 0 { - i -= len(m.Addrs) - copy(dAtA[i:], m.Addrs) - i = encodeVarint(dAtA, i, uint64(len(m.Addrs))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarint(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllReplicatorReply_Replicators) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply_Replicators) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllReplicatorReply_Replicators) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Schemas) > 0 { - for iNdEx := len(m.Schemas) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Schemas[iNdEx]) - copy(dAtA[i:], m.Schemas[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Schemas[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if m.Info != nil { - size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllReplicatorReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Replicators) > 0 { - for iNdEx := len(m.Replicators) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Replicators[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AddP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *AddP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AddP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *AddP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Err) > 0 { - i -= len(m.Err) - copy(dAtA[i:], m.Err) - i = encodeVarint(dAtA, i, uint64(len(m.Err))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RemoveP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *RemoveP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RemoveP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *RemoveP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Err) > 0 { - i -= len(m.Err) - copy(dAtA[i:], m.Err) - i = encodeVarint(dAtA, i, uint64(len(m.Err))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarint(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Collections[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *SetReplicatorRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - l = len(m.Addr) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *SetReplicatorReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *DeleteReplicatorRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *DeleteReplicatorReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorReply_Replicators_Info) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Addrs) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorReply_Replicators) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Info != nil { - l = m.Info.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if len(m.Schemas) > 0 { - for _, s := range m.Schemas { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Replicators) > 0 { - for _, e := range m.Replicators { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *AddP2PCollectionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *AddP2PCollectionsReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Err) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *RemoveP2PCollectionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *RemoveP2PCollectionsReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Err) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllP2PCollectionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetAllP2PCollectionsReply_Collection) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllP2PCollectionsReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, e := range m.Collections { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 -} -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addr = append(m.Addr[:0], dAtA[iNdEx:postIndex]...) - if m.Addr == nil { - m.Addr = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SetReplicatorReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SetReplicatorReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteReplicatorRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *DeleteReplicatorReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteReplicatorReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorReply_Replicators_Info) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addrs = append(m.Addrs[:0], dAtA[iNdEx:postIndex]...) - if m.Addrs == nil { - m.Addrs = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorReply_Replicators) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Info == nil { - m.Info = &GetAllReplicatorReply_Replicators_Info{} - } - if err := m.Info.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schemas = append(m.Schemas, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllReplicatorReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicators", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Replicators = append(m.Replicators, &GetAllReplicatorReply_Replicators{}) - if err := m.Replicators[len(m.Replicators)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Err = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Err = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllP2PCollectionsReply_Collection) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetAllP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, &GetAllP2PCollectionsReply_Collection{}) - if err := m.Collections[len(m.Collections)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} - -func skip(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - depth := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflow - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflow - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - case 1: - iNdEx += 8 - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflow - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if length < 0 { - return 0, ErrInvalidLength - } - iNdEx += length - case 3: - depth++ - case 4: - if depth == 0 { - return 0, ErrUnexpectedEndOfGroup - } - depth-- - case 5: - iNdEx += 4 - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - if iNdEx < 0 { - return 0, ErrInvalidLength - } - if depth == 0 { - return iNdEx, nil - } - } - return 0, io.ErrUnexpectedEOF -} - -var ( - ErrInvalidLength = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflow = fmt.Errorf("proto: integer overflow") - ErrUnexpectedEndOfGroup = fmt.Errorf("proto: unexpected end of group") -) diff --git a/net/client.go b/net/client.go index e38df0ed54..947495c5e1 100644 --- a/net/client.go +++ b/net/client.go @@ -38,13 +38,13 @@ func (s *server) pushLog(ctx context.Context, evt events.Update, pid peer.ID) er "Preparing pushLog request", logging.NewKV("DocKey", evt.DocKey), logging.NewKV("CID", evt.Cid), - logging.NewKV("SchemaId", evt.SchemaID)) + logging.NewKV("SchemaRoot", evt.SchemaRoot)) body := &pb.PushLogRequest_Body{ - DocKey: []byte(evt.DocKey), - Cid: evt.Cid.Bytes(), - SchemaID: []byte(evt.SchemaID), - Creator: s.peer.host.ID().String(), + DocKey: []byte(evt.DocKey), + Cid: evt.Cid.Bytes(), + SchemaRoot: []byte(evt.SchemaRoot), + Creator: s.peer.host.ID().String(), Log: &pb.Document_Log{ Block: evt.Block.RawData(), }, diff --git a/net/client_test.go b/net/client_test.go index e28c543175..a390485e25 100644 --- a/net/client_test.go +++ b/net/client_test.go @@ -15,7 +15,6 @@ import ( "testing" "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" "google.golang.org/grpc" @@ -40,11 +39,11 @@ func TestPushlogWithDialFailure(t *testing.T) { ) err = n.server.pushLog(ctx, events.Update{ - DocKey: doc.Key().String(), - Cid: cid, - SchemaID: "test", - Block: &EmptyNode{}, - Priority: 1, + DocKey: doc.Key().String(), + Cid: cid, + SchemaRoot: "test", + Block: &EmptyNode{}, + Priority: 1, }, peer.ID("some-peer-id")) require.Contains(t, err.Error(), "no transport security set") } @@ -60,11 +59,11 @@ func TestPushlogWithInvalidPeerID(t *testing.T) { require.NoError(t, err) err = n.server.pushLog(ctx, events.Update{ - DocKey: doc.Key().String(), - Cid: cid, - SchemaID: "test", - Block: &EmptyNode{}, - Priority: 1, + DocKey: doc.Key().String(), + Cid: cid, + SchemaRoot: "test", + Block: &EmptyNode{}, + Priority: 1, }, peer.ID("some-peer-id")) require.Contains(t, err.Error(), "failed to parse peer ID") } @@ -76,12 +75,7 @@ func TestPushlogW_WithValidPeerID_NoError(t *testing.T) { _, n2 := newTestNode(ctx, t) n2.Start() - err := n1.host.Connect(ctx, peer.AddrInfo{ - ID: n2.PeerID(), - Addrs: []ma.Multiaddr{ - n2.host.Addrs()[0], - }, - }) + err := n1.host.Connect(ctx, n2.PeerInfo()) require.NoError(t, err) _, err = n1.db.AddSchema(ctx, `type User { @@ -111,11 +105,11 @@ func TestPushlogW_WithValidPeerID_NoError(t *testing.T) { require.NoError(t, err) err = n1.server.pushLog(ctx, events.Update{ - DocKey: doc.Key().String(), - Cid: cid, - SchemaID: col.SchemaID(), - Block: &EmptyNode{}, - Priority: 1, - }, n2.PeerID()) + DocKey: doc.Key().String(), + Cid: cid, + SchemaRoot: col.SchemaRoot(), + Block: &EmptyNode{}, + Priority: 1, + }, n2.PeerInfo().ID) require.NoError(t, err) } diff --git a/net/config.go b/net/config.go index 28fd73f25e..4c33dc02a6 100644 --- a/net/config.go +++ b/net/config.go @@ -16,6 +16,7 @@ import ( "time" cconnmgr "github.com/libp2p/go-libp2p/core/connmgr" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/libp2p/go-libp2p/p2p/net/connmgr" ma "github.com/multiformats/go-multiaddr" "google.golang.org/grpc" @@ -26,8 +27,7 @@ import ( // Options is the node options. type Options struct { ListenAddrs []ma.Multiaddr - TCPAddr ma.Multiaddr - DataPath string + PrivateKey crypto.PrivKey EnablePubSub bool EnableRelay bool GRPCServerOptions []grpc.ServerOption @@ -68,13 +68,8 @@ func WithConfig(cfg *config.Config) NodeOpt { if err != nil { return err } - err = WithListenTCPAddrString(cfg.Net.TCPAddress)(opt) - if err != nil { - return err - } opt.EnableRelay = cfg.Net.RelayEnabled opt.EnablePubSub = cfg.Net.PubSubEnabled - opt.DataPath = cfg.Datastore.Badger.Path opt.ConnManager, err = NewConnManager(100, 400, time.Second*20) if err != nil { return err @@ -83,10 +78,10 @@ func WithConfig(cfg *config.Config) NodeOpt { } } -// DataPath sets the data path. -func WithDataPath(path string) NodeOpt { +// WithPrivateKey sets the p2p host private key. +func WithPrivateKey(priv crypto.PrivKey) NodeOpt { return func(opt *Options) error { - opt.DataPath = path + opt.PrivateKey = priv return nil } } @@ -121,18 +116,6 @@ func WithListenP2PAddrStrings(addrs ...string) NodeOpt { } } -// ListenTCPAddrString sets the TCP address to listen on, as Multiaddr. -func WithListenTCPAddrString(addr string) NodeOpt { - return func(opt *Options) error { - a, err := ma.NewMultiaddr(addr) - if err != nil { - return err - } - opt.TCPAddr = a - return nil - } -} - // ListenAddrs sets the address to listen on given as MultiAddr(s). func WithListenAddrs(addrs ...ma.Multiaddr) NodeOpt { return func(opt *Options) error { diff --git a/net/config_test.go b/net/config_test.go index bffc19aead..6f306c29ed 100644 --- a/net/config_test.go +++ b/net/config_test.go @@ -14,6 +14,7 @@ import ( "testing" "time" + "github.com/libp2p/go-libp2p/core/crypto" ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/require" @@ -54,23 +55,14 @@ func TestWithConfigWithP2PAddressError(t *testing.T) { require.Contains(t, err.Error(), "failed to parse multiaddr") } -func TestWithConfigWitTCPAddressError(t *testing.T) { - cfg := config.Config{ - Net: &config.NetConfig{ - P2PAddress: "/ip4/0.0.0.0/tcp/9999", - TCPAddress: "/willerror/0.0.0.0/tcp/9999", - }, - } - err := WithConfig(&cfg)(&Options{}) - require.Contains(t, err.Error(), "failed to parse multiaddr") -} +func TestWithPrivateKey(t *testing.T) { + key, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) + require.NoError(t, err) -func TestWithDataPath(t *testing.T) { - path := "test/path" - opt, err := NewMergedOptions(WithDataPath(path)) + opt, err := NewMergedOptions(WithPrivateKey(key)) require.NoError(t, err) require.NotNil(t, opt) - require.Equal(t, path, opt.DataPath) + require.Equal(t, key, opt.PrivateKey) } func TestWithPubSub(t *testing.T) { @@ -101,20 +93,6 @@ func TestWithListenP2PAddrStrings(t *testing.T) { require.Equal(t, addr, opt.ListenAddrs[0].String()) } -func TestWithListenTCPAddrStringWithError(t *testing.T) { - addr := "/willerror/0.0.0.0/tcp/9999" - _, err := NewMergedOptions(WithListenTCPAddrString(addr)) - require.Contains(t, err.Error(), "failed to parse multiaddr") -} - -func TestWithListenTCPAddrString(t *testing.T) { - addr := "/ip4/0.0.0.0/tcp/9999" - opt, err := NewMergedOptions(WithListenTCPAddrString(addr)) - require.NoError(t, err) - require.NotNil(t, opt) - require.Equal(t, addr, opt.TCPAddr.String()) -} - func TestWithListenAddrs(t *testing.T) { addr := "/ip4/0.0.0.0/tcp/9999" a, err := ma.NewMultiaddr(addr) diff --git a/net/dag.go b/net/dag.go index 2d49790f90..1760864db4 100644 --- a/net/dag.go +++ b/net/dag.go @@ -20,9 +20,6 @@ import ( "github.com/ipfs/go-cid" ipld "github.com/ipfs/go-ipld-format" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/logging" ) @@ -50,17 +47,10 @@ type SessionDAGSyncer interface { } type dagJob struct { - session *sync.WaitGroup // A waitgroup to wait for all related jobs to conclude - nodeGetter ipld.NodeGetter // a node getter to use - node ipld.Node // the current ipld Node - - collection client.Collection // collection our document belongs to - dsKey core.DataStoreKey // datastore key of our document - fieldName string // field of the subgraph our node belongs to - - // Transaction common to a pushlog event. It is used to pass it along to processLog - // and handleChildBlocks within the dagWorker. - txn datastore.Txn + session *sync.WaitGroup // A waitgroup to wait for all related jobs to conclude + bp *blockProcessor // the block processor to use + cid cid.Cid // the cid of the block to fetch from the P2P network + isComposite bool // whether this is a composite block // OLD FIELDS // root cid.Cid // the root of the branch we are walking down @@ -87,13 +77,13 @@ func (p *Peer) sendJobWorker() { return case newJob := <-p.sendJobs: - jobs, ok := docWorkerQueue[newJob.dsKey.DocKey] + jobs, ok := docWorkerQueue[newJob.bp.dsKey.DocKey] if !ok { jobs = make(chan *dagJob, numWorkers) for i := 0; i < numWorkers; i++ { go p.dagWorker(jobs) } - docWorkerQueue[newJob.dsKey.DocKey] = jobs + docWorkerQueue[newJob.bp.dsKey.DocKey] = jobs } jobs <- newJob @@ -113,8 +103,8 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { log.Debug( p.ctx, "Starting new job from DAG queue", - logging.NewKV("Datastore Key", job.dsKey), - logging.NewKV("CID", job.node.Cid()), + logging.NewKV("Datastore Key", job.bp.dsKey), + logging.NewKV("CID", job.cid), ) select { @@ -125,44 +115,25 @@ func (p *Peer) dagWorker(jobs chan *dagJob) { default: } - children, err := p.processLog( - p.ctx, - job.txn, - job.collection, - job.dsKey, - job.fieldName, - job.node, - job.nodeGetter, - true, - ) - if err != nil { - log.ErrorE( - p.ctx, - "Error processing log", - err, - logging.NewKV("Datastore key", job.dsKey), - logging.NewKV("CID", job.node.Cid()), - ) - job.session.Done() - continue - } - - if len(children) == 0 { - job.session.Done() - continue - } - go func(j *dagJob) { - p.handleChildBlocks( - j.session, - j.txn, - j.collection, - j.dsKey, - j.fieldName, - j.node, - children, - j.nodeGetter, - ) + if j.bp.getter != nil && j.cid.Defined() { + cNode, err := j.bp.getter.Get(p.ctx, j.cid) + if err != nil { + log.ErrorE(p.ctx, "Failed to get node", err, logging.NewKV("CID", j.cid)) + j.session.Done() + return + } + err = j.bp.processRemoteBlock( + p.ctx, + j.session, + cNode, + j.isComposite, + ) + if err != nil { + log.ErrorE(p.ctx, "Failed to process remote block", err, logging.NewKV("CID", j.cid)) + } + } + p.queuedChildren.Remove(j.cid) j.session.Done() }(job) } diff --git a/net/dag_test.go b/net/dag_test.go index d0e9a18ce7..6f0145b0ae 100644 --- a/net/dag_test.go +++ b/net/dag_test.go @@ -18,14 +18,13 @@ import ( dag "github.com/ipfs/boxo/ipld/merkledag" "github.com/ipfs/go-cid" - format "github.com/ipfs/go-ipld-format" ipld "github.com/ipfs/go-ipld-format" mh "github.com/multiformats/go-multihash" "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/core/crdt" + "github.com/sourcenetwork/defradb/merkle/clock" netutils "github.com/sourcenetwork/defradb/net/utils" ) @@ -39,52 +38,7 @@ func TestSendJobWorker_ExitOnContextClose_NoError(t *testing.T) { n.sendJobWorker() close(done) }() - err := n.Close() - require.NoError(t, err) - select { - case <-done: - case <-time.After(timeout): - t.Error("failed to close sendJobWorker") - } -} - -func TestSendJobWorker_WithNewJobWithClosePriorToProcessing_NoError(t *testing.T) { - ctx := context.Background() - db, n := newTestNode(ctx, t) - done := make(chan struct{}) - go func() { - n.sendJobWorker() - close(done) - }() - _, err := db.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - - col, err := db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) - require.NoError(t, err) - dsKey := core.DataStoreKeyFromDocKey(doc.Key()) - - txn, err := db.NewTxn(ctx, false) - require.NoError(t, err) - - wg := sync.WaitGroup{} - wg.Add(1) - - n.sendJobs <- &dagJob{ - session: &wg, - node: &EmptyNode{}, - collection: col, - dsKey: dsKey, - txn: txn, - } - - err = n.Close() - require.NoError(t, err) + n.Close() select { case <-done: case <-time.After(timeout): @@ -106,9 +60,6 @@ func TestSendJobWorker_WithNewJob_NoError(t *testing.T) { }`) require.NoError(t, err) - col, err := db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) require.NoError(t, err) dsKey := core.DataStoreKeyFromDocKey(doc.Key()) @@ -120,16 +71,15 @@ func TestSendJobWorker_WithNewJob_NoError(t *testing.T) { wg.Add(1) n.sendJobs <- &dagJob{ - session: &wg, - node: &EmptyNode{}, - collection: col, - dsKey: dsKey, - txn: txn, + session: &wg, + bp: &blockProcessor{ + dsKey: dsKey, + txn: txn, + }, } // Give the jobworker time to process the job. time.Sleep(100 * time.Microsecond) - err = n.Close() - require.NoError(t, err) + n.Close() select { case <-done: case <-time.After(timeout): @@ -151,9 +101,6 @@ func TestSendJobWorker_WithCloseJob_NoError(t *testing.T) { }`) require.NoError(t, err) - col, err := db.GetCollectionByName(ctx, "User") - require.NoError(t, err) - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) require.NoError(t, err) dsKey := core.DataStoreKeyFromDocKey(doc.Key()) @@ -165,17 +112,16 @@ func TestSendJobWorker_WithCloseJob_NoError(t *testing.T) { wg.Add(1) n.sendJobs <- &dagJob{ - session: &wg, - node: &EmptyNode{}, - collection: col, - dsKey: dsKey, - txn: txn, + session: &wg, + bp: &blockProcessor{ + dsKey: dsKey, + txn: txn, + }, } n.closeJob <- dsKey.DocKey - err = n.Close() - require.NoError(t, err) + n.Close() select { case <-done: case <-time.After(timeout): @@ -183,15 +129,19 @@ func TestSendJobWorker_WithCloseJob_NoError(t *testing.T) { } } -func TestSendJobWorker_WithPeerAndNoChildren_NoError(t *testing.T) { +func TestSendJobWorker_WithPeer_NoError(t *testing.T) { ctx := context.Background() db1, n1 := newTestNode(ctx, t) db2, n2 := newTestNode(ctx, t) addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) require.NoError(t, err) - n2.Boostrap(addrs) + n2.Bootstrap(addrs) + err = n1.WaitForPeerConnectionEvent(n2.PeerID()) + require.NoError(t, err) + err = n2.WaitForPeerConnectionEvent(n1.PeerID()) + require.NoError(t, err) done := make(chan struct{}) go func() { n2.sendJobWorker() @@ -219,138 +169,47 @@ func TestSendJobWorker_WithPeerAndNoChildren_NoError(t *testing.T) { err = col.Create(ctx, doc) require.NoError(t, err) - txn, err := db2.NewTxn(ctx, false) + txn1, _ := db1.NewTxn(ctx, false) + heads, _, err := clock.NewHeadSet(txn1.Headstore(), dsKey.ToHeadStoreKey().WithFieldId(core.COMPOSITE_NAMESPACE)).List(ctx) + require.NoError(t, err) + txn1.Discard(ctx) + + txn2, err := db2.NewTxn(ctx, false) require.NoError(t, err) wg := sync.WaitGroup{} wg.Add(1) - delta := &crdt.CompositeDAGDelta{ - SchemaVersionID: col.Schema().VersionID, - Priority: 1, - DocKey: doc.Key().Bytes(), - } - - node, err := makeNode(delta, []cid.Cid{}) - require.NoError(t, err) - - var getter format.NodeGetter = n2.Peer.newDAGSyncerTxn(txn) + var getter ipld.NodeGetter = n2.Peer.newDAGSyncerTxn(txn2) if sessionMaker, ok := getter.(SessionDAGSyncer); ok { log.Debug(ctx, "Upgrading DAGSyncer with a session") getter = sessionMaker.Session(ctx) } n2.sendJobs <- &dagJob{ - session: &wg, - nodeGetter: getter, - node: node, - collection: col, - dsKey: dsKey, - txn: txn, + bp: newBlockProcessor(n2.Peer, txn2, col, dsKey, getter), + session: &wg, + cid: heads[0], + isComposite: true, } - // Give the jobworker time to process the job. - time.Sleep(100 * time.Microsecond) - err = n1.Close() - require.NoError(t, err) - err = n2.Close() - require.NoError(t, err) - select { - case <-done: - case <-time.After(timeout): - t.Error("failed to close sendJobWorker") - } -} - -func TestSendJobWorker_WithPeerAndChildren_NoError(t *testing.T) { - ctx := context.Background() - db1, n1 := newTestNode(ctx, t) - db2, n2 := newTestNode(ctx, t) - - addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) - require.NoError(t, err) - n2.Boostrap(addrs) - - done := make(chan struct{}) - go func() { - n2.sendJobWorker() - close(done) - }() - - _, err = db1.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) - _, err = db2.AddSchema(ctx, `type User { - name: String - age: Int - }`) - require.NoError(t, err) + wg.Wait() - col, err := db1.GetCollectionByName(ctx, "User") + err = txn2.Commit(ctx) require.NoError(t, err) - doc, err := client.NewDocFromJSON([]byte(`{"name": "John", "age": 30}`)) + block, err := n1.db.Blockstore().Get(ctx, heads[0]) require.NoError(t, err) - dsKey := core.DataStoreKeyFromDocKey(doc.Key()) - - err = col.Create(ctx, doc) - require.NoError(t, err) - - txn, err := db2.NewTxn(ctx, false) + nd, err := dag.DecodeProtobufBlock(block) require.NoError(t, err) - wg := sync.WaitGroup{} - wg.Add(1) - - links := []core.DAGLink{} - for k := range doc.Fields() { - delta := &crdt.LWWRegDelta{ - SchemaVersionID: col.Schema().VersionID, - Priority: 1, - DocKey: doc.Key().Bytes(), - FieldName: k, - } - - node, err := makeNode(delta, []cid.Cid{}) + for _, link := range nd.Links() { + exists, err := n2.db.Blockstore().Has(ctx, link.Cid) require.NoError(t, err) - - links = append(links, core.DAGLink{ - Name: k, - Cid: node.Cid(), - }) - } - - delta := &crdt.CompositeDAGDelta{ - SchemaVersionID: col.Schema().VersionID, - Priority: 1, - DocKey: doc.Key().Bytes(), - SubDAGs: links, - } - - node, err := makeNode(delta, []cid.Cid{}) - require.NoError(t, err) - - var getter format.NodeGetter = n2.Peer.newDAGSyncerTxn(txn) - if sessionMaker, ok := getter.(SessionDAGSyncer); ok { - log.Debug(ctx, "Upgrading DAGSyncer with a session") - getter = sessionMaker.Session(ctx) + require.True(t, exists) } - n2.sendJobs <- &dagJob{ - session: &wg, - nodeGetter: getter, - node: node, - collection: col, - dsKey: dsKey, - txn: txn, - } - // Give the jobworker time to process the job. - time.Sleep(100 * time.Microsecond) - err = n1.Close() - require.NoError(t, err) - err = n2.Close() - require.NoError(t, err) + n1.Close() + n2.Close() select { case <-done: case <-time.After(timeout): diff --git a/net/dialer.go b/net/dialer.go index 16ac03d03f..3cd12be7ff 100644 --- a/net/dialer.go +++ b/net/dialer.go @@ -48,7 +48,7 @@ func (s *server) dial(peerID libpeer.ID) (pb.ServiceClient, error) { } ctx, cancel := context.WithTimeout(context.Background(), DialTimeout) defer cancel() - conn, err := grpc.DialContext(ctx, peerID.Pretty(), s.opts...) + conn, err := grpc.DialContext(ctx, peerID.String(), s.opts...) if err != nil { return nil, err } diff --git a/net/dialer_test.go b/net/dialer_test.go index 5e01b2384f..d092602490 100644 --- a/net/dialer_test.go +++ b/net/dialer_test.go @@ -27,23 +27,19 @@ func TestDial_WithConnectedPeer_NoError(t *testing.T) { ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) assert.NoError(t, err) n2, err := NewNode( ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) assert.NoError(t, err) addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) if err != nil { t.Fatal(err) } - n2.Boostrap(addrs) + n2.Bootstrap(addrs) _, err = n1.server.dial(n2.PeerID()) require.NoError(t, err) } @@ -55,23 +51,19 @@ func TestDial_WithConnectedPeerAndSecondConnection_NoError(t *testing.T) { ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) assert.NoError(t, err) n2, err := NewNode( ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) assert.NoError(t, err) addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) if err != nil { t.Fatal(err) } - n2.Boostrap(addrs) + n2.Bootstrap(addrs) _, err = n1.server.dial(n2.PeerID()) require.NoError(t, err) @@ -86,23 +78,19 @@ func TestDial_WithConnectedPeerAndSecondConnectionWithConnectionShutdown_Closing ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) assert.NoError(t, err) n2, err := NewNode( ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) assert.NoError(t, err) addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) if err != nil { t.Fatal(err) } - n2.Boostrap(addrs) + n2.Bootstrap(addrs) _, err = n1.server.dial(n2.PeerID()) require.NoError(t, err) diff --git a/net/errors.go b/net/errors.go index 3f8d4926c5..e9ac8fc748 100644 --- a/net/errors.go +++ b/net/errors.go @@ -13,6 +13,8 @@ package net import ( "fmt" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcenetwork/defradb/errors" ) @@ -21,6 +23,9 @@ const ( errFailedToGetDockey = "failed to get DocKey from broadcast message" errPublishingToDockeyTopic = "can't publish log %s for dockey %s" errPublishingToSchemaTopic = "can't publish log %s for schema %s" + errReplicatorExists = "replicator already exists for %s with peerID %s" + errReplicatorDocKey = "failed to get dockey for replicator %s with peerID %s" + errReplicatorCollections = "failed to get collections for replicator" ) var ( @@ -47,3 +52,15 @@ func NewErrPublishingToDockeyTopic(inner error, cid, key string, kv ...errors.KV func NewErrPublishingToSchemaTopic(inner error, cid, key string, kv ...errors.KV) error { return errors.Wrap(fmt.Sprintf(errPublishingToSchemaTopic, cid, key), inner, kv...) } + +func NewErrReplicatorExists(collection string, peerID peer.ID, kv ...errors.KV) error { + return errors.New(fmt.Sprintf(errReplicatorExists, collection, peerID), kv...) +} + +func NewErrReplicatorDocKey(inner error, collection string, peerID peer.ID, kv ...errors.KV) error { + return errors.Wrap(fmt.Sprintf(errReplicatorDocKey, collection, peerID), inner, kv...) +} + +func NewErrReplicatorCollections(inner error, kv ...errors.KV) error { + return errors.Wrap(errReplicatorCollections, inner, kv...) +} diff --git a/net/node.go b/net/node.go index 8f916cda16..71e9a10d55 100644 --- a/net/node.go +++ b/net/node.go @@ -19,15 +19,12 @@ package net import ( "context" "fmt" - "os" - "path/filepath" "sync" "sync/atomic" "time" "github.com/ipfs/boxo/ipns" ds "github.com/ipfs/go-datastore" - "github.com/ipfs/go-datastore/namespace" libp2p "github.com/libp2p/go-libp2p" dht "github.com/libp2p/go-libp2p-kad-dht" dualdht "github.com/libp2p/go-libp2p-kad-dht/dual" @@ -39,9 +36,13 @@ import ( "github.com/libp2p/go-libp2p/core/network" "github.com/libp2p/go-libp2p/core/peer" "github.com/libp2p/go-libp2p/core/routing" - "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" + "github.com/multiformats/go-multiaddr" - "github.com/textileio/go-libp2p-pubsub-rpc/finalizer" + "github.com/sourcenetwork/go-libp2p-pubsub-rpc/finalizer" + + // @TODO: https://github.com/sourcenetwork/defradb/issues/1902 + //nolint:staticcheck + "github.com/libp2p/go-libp2p/p2p/host/peerstore/pstoreds" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/logging" @@ -49,6 +50,8 @@ import ( var evtWaitTimeout = 10 * time.Second +var _ client.P2P = (*Node)(nil) + // Node is a networked peer instance of DefraDB. type Node struct { // embed the DB interface into the node @@ -82,19 +85,19 @@ func NewNode( fin := finalizer.NewFinalizer() - // create our peerstore from the underlying defra rootstore - // prefixed with "p2p" - rootstore := db.Root() - pstore := namespace.Wrap(rootstore, ds.NewKey("/db")) - peerstore, err := pstoreds.NewPeerstore(ctx, pstore, pstoreds.DefaultOpts()) + peerstore, err := pstoreds.NewPeerstore(ctx, db.Peerstore(), pstoreds.DefaultOpts()) if err != nil { return nil, fin.Cleanup(err) } fin.Add(peerstore) - hostKey, err := getHostKey(options.DataPath) - if err != nil { - return nil, fin.Cleanup(err) + if options.PrivateKey == nil { + // generate an ephemeral private key + key, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) + if err != nil { + return nil, fin.Cleanup(err) + } + options.PrivateKey = key } var ddht *dualdht.DHT @@ -102,7 +105,7 @@ func NewNode( libp2pOpts := []libp2p.Option{ libp2p.ConnectionManager(options.ConnManager), libp2p.DefaultTransports, - libp2p.Identity(hostKey), + libp2p.Identity(options.PrivateKey), libp2p.ListenAddrs(options.ListenAddrs...), libp2p.Peerstore(peerstore), libp2p.Routing(func(h host.Host) (routing.PeerRouting, error) { @@ -113,13 +116,13 @@ func NewNode( // if dsb, isBatching := rootstore.(ds.Batching); isBatching { // store = dsb // } - store := rootstore // Delete this line once we remove batchable datastore support. + store := db.Root() // Delete this line once we remove batchable datastore support. ddht, err = newDHT(ctx, h, store) return ddht, err }), } - if options.EnableRelay { - libp2pOpts = append(libp2pOpts, libp2p.EnableRelay()) + if !options.EnableRelay { + libp2pOpts = append(libp2pOpts, libp2p.DisableRelay()) } h, err := libp2p.New(libp2pOpts...) @@ -154,7 +157,6 @@ func NewNode( h, ddht, ps, - options.TCPAddr, options.GRPCServerOptions, options.GRPCDialOptions, ) @@ -186,8 +188,8 @@ func NewNode( return n, nil } -// Boostrap connects to the given peers. -func (n *Node) Boostrap(addrs []peer.AddrInfo) { +// Bootstrap connects to the given peers. +func (n *Node) Bootstrap(addrs []peer.AddrInfo) { var connected uint64 var wg sync.WaitGroup @@ -218,14 +220,19 @@ func (n *Node) Boostrap(addrs []peer.AddrInfo) { } } -// ListenAddrs returns the Multiaddr list of the hosts' listening addresses. +func (n *Node) PeerID() peer.ID { + return n.host.ID() +} + func (n *Node) ListenAddrs() []multiaddr.Multiaddr { - return n.host.Addrs() + return n.host.Network().ListenAddresses() } -// PeerID returns the node's peer ID. -func (n *Node) PeerID() peer.ID { - return n.host.ID() +func (n *Node) PeerInfo() peer.AddrInfo { + return peer.AddrInfo{ + ID: n.host.ID(), + Addrs: n.host.Network().ListenAddresses(), + } } // subscribeToPeerConnectionEvents subscribes the node to the event bus for a peer connection change. @@ -377,46 +384,6 @@ func (n *Node) WaitForPushLogFromPeerEvent(id peer.ID) error { } } -// replace with proper keystore -func getHostKey(keypath string) (crypto.PrivKey, error) { - // If a local datastore is used, the key is written to a file - pth := filepath.Join(keypath, "key") - _, err := os.Stat(pth) - if os.IsNotExist(err) { - key, bytes, err := newHostKey() - if err != nil { - return nil, err - } - if err := os.MkdirAll(keypath, os.ModePerm); err != nil { - return nil, err - } - if err = os.WriteFile(pth, bytes, 0400); err != nil { - return nil, err - } - return key, nil - } else if err != nil { - return nil, err - } else { - bytes, err := os.ReadFile(pth) - if err != nil { - return nil, err - } - return crypto.UnmarshalPrivateKey(bytes) - } -} - -func newHostKey() (crypto.PrivKey, []byte, error) { - priv, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) - if err != nil { - return nil, nil, err - } - key, err := crypto.MarshalPrivateKey(priv) - if err != nil { - return nil, nil, err - } - return priv, key, nil -} - func newDHT(ctx context.Context, h host.Host, dsb ds.Batching) (*dualdht.DHT, error) { dhtOpts := []dualdht.Option{ dualdht.DHTOption(dht.NamespacedValidator("pk", record.PublicKeyValidator{})), @@ -432,7 +399,12 @@ func newDHT(ctx context.Context, h host.Host, dsb ds.Batching) (*dualdht.DHT, er } // Close closes the node and all its services. -func (n Node) Close() error { - n.cancel() - return n.Peer.Close() +func (n Node) Close() { + if n.cancel != nil { + n.cancel() + } + if n.Peer != nil { + n.Peer.Close() + } + n.DB.Close() } diff --git a/net/node_test.go b/net/node_test.go index c622be18ca..15ccc7d065 100644 --- a/net/node_test.go +++ b/net/node_test.go @@ -11,15 +11,14 @@ package net import ( - "bytes" "context" "testing" "time" - badger "github.com/dgraph-io/badger/v4" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/peer" ma "github.com/multiformats/go-multiaddr" + badger "github.com/sourcenetwork/badger/v4" "github.com/stretchr/testify/require" "github.com/sourcenetwork/defradb/client" @@ -27,7 +26,6 @@ import ( badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/db" - "github.com/sourcenetwork/defradb/logging" netutils "github.com/sourcenetwork/defradb/net/utils" ) @@ -55,38 +53,21 @@ func TestNewNode_WithEnableRelay_NoError(t *testing.T) { context.Background(), db, WithEnableRelay(true), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) } -func TestNewNode_WithInvalidListenTCPAddrString_ParseError(t *testing.T) { - ctx := context.Background() - store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) - require.NoError(t, err) - _, err = NewNode( - context.Background(), - db, - WithListenTCPAddrString("/ip4/碎片整理"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), - ) - require.EqualError(t, err, "failed to parse multiaddr \"/ip4/碎片整理\": invalid value \"碎片整理\" for protocol ip4: failed to parse ip4 addr: 碎片整理") -} - func TestNewNode_WithDBClosed_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) + db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) require.NoError(t, err) - db.Close(ctx) + db.Close() + _, err = NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.ErrorContains(t, err, "datastore closed") } @@ -100,8 +81,6 @@ func TestNewNode_NoPubSub_NoError(t *testing.T) { context.Background(), db, WithPubSub(false), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) require.Nil(t, n.ps) @@ -117,8 +96,6 @@ func TestNewNode_WithPubSub_NoError(t *testing.T) { ctx, db, WithPubSub(true), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -126,20 +103,6 @@ func TestNewNode_WithPubSub_NoError(t *testing.T) { require.NotNil(t, n.ps) } -func TestNewNode_WithPubSub_FailsWithoutWithDataPath(t *testing.T) { - ctx := context.Background() - store := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, store, db.WithUpdateEvents()) - require.NoError(t, err) - - _, err = NewNode( - ctx, - db, - WithPubSub(true), - ) - require.EqualError(t, err, "1 error occurred:\n\t* mkdir : no such file or directory\n\n") -} - func TestNodeClose_NoError(t *testing.T) { ctx := context.Background() store := memory.NewDatastore(ctx) @@ -148,12 +111,9 @@ func TestNodeClose_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) - err = n.Close() - require.NoError(t, err) + n.Close() } func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { @@ -166,11 +126,9 @@ func TestNewNode_BootstrapWithNoPeer_NoError(t *testing.T) { ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) - n1.Boostrap([]peer.AddrInfo{}) + n1.Bootstrap([]peer.AddrInfo{}) } func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { @@ -183,23 +141,19 @@ func TestNewNode_BootstrapWithOnePeer_NoError(t *testing.T) { ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) n2, err := NewNode( ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) addrs, err := netutils.ParsePeers([]string{n1.host.Addrs()[0].String() + "/p2p/" + n1.PeerID().String()}) if err != nil { t.Fatal(err) } - n2.Boostrap(addrs) + n2.Bootstrap(addrs) } func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing.T) { @@ -212,16 +166,12 @@ func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) n2, err := NewNode( ctx, db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) addrs, err := netutils.ParsePeers([]string{ @@ -231,21 +181,7 @@ func TestNewNode_BootstrapWithOneValidPeerAndManyInvalidPeers_NoError(t *testing "/ip4/0.0.0.0/tcp/1236/p2p/" + "12D3KooWC8YY6Tx3uAeHsdBmoy7PJPwqXAHE4HkCZ5veankKWci4", }) require.NoError(t, err) - n2.Boostrap(addrs) -} - -func mergeOptions(nodeOpts ...NodeOpt) (Options, error) { - var options Options - var nodeOpt NodeOpt - for _, opt := range append(nodeOpts, nodeOpt) { - if opt == nil { - continue - } - if err := opt(&options); err != nil { - return options, err - } - } - return options, nil + n2.Bootstrap(addrs) } func TestListenAddrs_WithListenP2PAddrStrings_NoError(t *testing.T) { @@ -257,32 +193,17 @@ func TestListenAddrs_WithListenP2PAddrStrings_NoError(t *testing.T) { context.Background(), db, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) require.Contains(t, n.ListenAddrs()[0].String(), "/tcp/") } -func TestWithListenTCPAddrString_WithInvalidListenTCPAddrString_ParseError(t *testing.T) { - opt := WithListenTCPAddrString("/ip4/碎片整理") - options, err := mergeOptions(opt) - require.EqualError(t, err, "failed to parse multiaddr \"/ip4/碎片整理\": invalid value \"碎片整理\" for protocol ip4: failed to parse ip4 addr: 碎片整理") - require.Equal(t, Options{}, options) -} - func TestNodeConfig_NoError(t *testing.T) { - tempDir := t.TempDir() - cfg := config.DefaultConfig() cfg.Net.P2PAddress = "/ip4/0.0.0.0/tcp/9179" - cfg.Net.TCPAddress = "/ip4/0.0.0.0/tcp/9169" - cfg.Net.RPCTimeout = "100s" - cfg.Net.RPCMaxConnectionIdle = "111s" cfg.Net.RelayEnabled = true cfg.Net.PubSubEnabled = true - cfg.Datastore.Badger.Path = tempDir configOpt := WithConfig(cfg) options, err := NewMergedOptions(configOpt) @@ -291,14 +212,10 @@ func TestNodeConfig_NoError(t *testing.T) { // confirming it provides the same config as a manually constructed node.Options p2pAddr, err := ma.NewMultiaddr(cfg.Net.P2PAddress) require.NoError(t, err) - tcpAddr, err := ma.NewMultiaddr(cfg.Net.TCPAddress) - require.NoError(t, err) connManager, err := NewConnManager(100, 400, time.Second*20) require.NoError(t, err) expectedOptions := Options{ ListenAddrs: []ma.Multiaddr{p2pAddr}, - TCPAddr: tcpAddr, - DataPath: tempDir, EnablePubSub: true, EnableRelay: true, ConnManager: connManager, @@ -307,70 +224,15 @@ func TestNodeConfig_NoError(t *testing.T) { for k, v := range options.ListenAddrs { require.Equal(t, expectedOptions.ListenAddrs[k], v) } - require.Equal(t, expectedOptions.TCPAddr.String(), options.TCPAddr.String()) - require.Equal(t, expectedOptions.DataPath, options.DataPath) require.Equal(t, expectedOptions.EnablePubSub, options.EnablePubSub) require.Equal(t, expectedOptions.EnableRelay, options.EnableRelay) } -func TestSubscribeToPeerConnectionEvents_SubscriptionError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), - ) - require.NoError(t, err) - - b := &bytes.Buffer{} - - log.ApplyConfig(logging.Config{ - Pipe: b, - }) - - n.Peer.host = &mockHost{n.Peer.host} - - n.subscribeToPeerConnectionEvents() - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - require.Equal(t, "failed to subscribe to peer connectedness changed event: mock error", logLines[0]["msg"]) - - // reset logger - log = logging.MustNewLogger("defra.net") -} - -func TestPeerConnectionEventEmitter_SingleEvent_NoError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), - ) - require.NoError(t, err) - - emitter, err := n.host.EventBus().Emitter(new(event.EvtPeerConnectednessChanged)) - require.NoError(t, err) - - err = emitter.Emit(event.EvtPeerConnectednessChanged{}) - require.NoError(t, err) -} - func TestPeerConnectionEventEmitter_MultiEvent_NoError(t *testing.T) { db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -389,50 +251,12 @@ func TestSubscribeToPubSubEvents_SubscriptionError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) - b := &bytes.Buffer{} - - log.ApplyConfig(logging.Config{ - Pipe: b, - }) - n.Peer.host = &mockHost{n.Peer.host} n.subscribeToPubSubEvents() - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - require.Equal(t, "failed to subscribe to pubsub event: mock error", logLines[0]["msg"]) - - // reset logger - log = logging.MustNewLogger("defra.net") -} - -func TestPubSubEventEmitter_SingleEvent_NoError(t *testing.T) { - db := FixtureNewMemoryDBWithBroadcaster(t) - n, err := NewNode( - context.Background(), - db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), - ) - require.NoError(t, err) - - emitter, err := n.host.EventBus().Emitter(new(EvtPubSub)) - require.NoError(t, err) - - err = emitter.Emit(EvtPubSub{}) - require.NoError(t, err) } func TestPubSubEventEmitter_MultiEvent_NoError(t *testing.T) { @@ -440,8 +264,6 @@ func TestPubSubEventEmitter_MultiEvent_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -460,33 +282,12 @@ func TestSubscribeToPushLogEvents_SubscriptionError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) - b := &bytes.Buffer{} - - log.ApplyConfig(logging.Config{ - Pipe: b, - }) - n.Peer.host = &mockHost{n.Peer.host} n.subscribeToPushLogEvents() - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - require.Equal(t, "failed to subscribe to push log event: mock error", logLines[0]["msg"]) - - // reset logger - log = logging.MustNewLogger("defra.net") } func TestPushLogEventEmitter_SingleEvent_NoError(t *testing.T) { @@ -494,8 +295,6 @@ func TestPushLogEventEmitter_SingleEvent_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -511,8 +310,6 @@ func TestPushLogEventEmitter_MultiEvent_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -531,8 +328,6 @@ func TestWaitForPeerConnectionEvent_WithSamePeer_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -557,8 +352,6 @@ func TestWaitForPeerConnectionEvent_WithDifferentPeer_TimeoutError(t *testing.T) n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -577,8 +370,6 @@ func TestWaitForPeerConnectionEvent_WithDifferentPeerAndContextClosed_NoError(t n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -599,8 +390,6 @@ func TestWaitForPubSubEvent_WithSamePeer_NoError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -625,8 +414,6 @@ func TestWaitForPubSubEvent_WithDifferentPeer_TimeoutError(t *testing.T) { n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -645,8 +432,6 @@ func TestWaitForPubSubEvent_WithDifferentPeerAndContextClosed_NoError(t *testing n, err := NewNode( context.Background(), db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -663,12 +448,11 @@ func TestWaitForPubSubEvent_WithDifferentPeerAndContextClosed_NoError(t *testing } func TestWaitForPushLogByPeerEvent_WithSamePeer_NoError(t *testing.T) { + ctx := context.Background() db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( - context.Background(), + ctx, db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -689,12 +473,11 @@ func TestWaitForPushLogByPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T) defer func() { evtWaitTimeout = 10 * time.Second }() + ctx := context.Background() db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( - context.Background(), + ctx, db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -709,12 +492,11 @@ func TestWaitForPushLogByPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T) } func TestWaitForPushLogByPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { + ctx := context.Background() db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( - context.Background(), + ctx, db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -731,12 +513,11 @@ func TestWaitForPushLogByPeerEvent_WithDifferentPeerAndContextClosed_NoError(t * } func TestWaitForPushLogFromPeerEvent_WithSamePeer_NoError(t *testing.T) { + ctx := context.Background() db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( - context.Background(), + ctx, db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -757,12 +538,11 @@ func TestWaitForPushLogFromPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T defer func() { evtWaitTimeout = 10 * time.Second }() + ctx := context.Background() db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( - context.Background(), + ctx, db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -777,12 +557,11 @@ func TestWaitForPushLogFromPeerEvent_WithDifferentPeer_TimeoutError(t *testing.T } func TestWaitForPushLogFromPeerEvent_WithDifferentPeerAndContextClosed_NoError(t *testing.T) { + ctx := context.Background() db := FixtureNewMemoryDBWithBroadcaster(t) n, err := NewNode( - context.Background(), + ctx, db, - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) diff --git a/net/pb/Makefile b/net/pb/Makefile index 62eef77354..233665c334 100644 --- a/net/pb/Makefile +++ b/net/pb/Makefile @@ -3,6 +3,11 @@ GO = $(PB:.proto=.pb.go) all: $(GO) +deps: + go install google.golang.org/protobuf/cmd/protoc-gen-go@latest + go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@latest + go install github.com/planetscale/vtprotobuf/cmd/protoc-gen-go-vtproto@latest + %.pb.go: %.proto protoc \ --go_out=. --plugin protoc-gen-go="${GOBIN}/protoc-gen-go" \ diff --git a/net/pb/net.pb.go b/net/pb/net.pb.go index 70daae73a7..92eaafa5be 100644 --- a/net/pb/net.pb.go +++ b/net/pb/net.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.30.0 -// protoc v3.21.9 +// protoc-gen-go v1.31.0 +// protoc v3.12.4 // source: net.proto package net_pb @@ -467,727 +467,33 @@ func (*GetHeadLogReply) Descriptor() ([]byte, []int) { return file_net_proto_rawDescGZIP(), []int{10} } -type SetReplicatorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` - Addr []byte `protobuf:"bytes,2,opt,name=addr,proto3" json:"addr,omitempty"` -} - -func (x *SetReplicatorRequest) Reset() { - *x = SetReplicatorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetReplicatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetReplicatorRequest) ProtoMessage() {} - -func (x *SetReplicatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[11] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetReplicatorRequest.ProtoReflect.Descriptor instead. -func (*SetReplicatorRequest) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{11} -} - -func (x *SetReplicatorRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -func (x *SetReplicatorRequest) GetAddr() []byte { - if x != nil { - return x.Addr - } - return nil -} - -type SetReplicatorReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` -} - -func (x *SetReplicatorReply) Reset() { - *x = SetReplicatorReply{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *SetReplicatorReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetReplicatorReply) ProtoMessage() {} - -func (x *SetReplicatorReply) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[12] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetReplicatorReply.ProtoReflect.Descriptor instead. -func (*SetReplicatorReply) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{12} -} - -func (x *SetReplicatorReply) GetPeerID() []byte { - if x != nil { - return x.PeerID - } - return nil -} - -type DeleteReplicatorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` - Collections []string `protobuf:"bytes,2,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *DeleteReplicatorRequest) Reset() { - *x = DeleteReplicatorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteReplicatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteReplicatorRequest) ProtoMessage() {} - -func (x *DeleteReplicatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[13] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteReplicatorRequest.ProtoReflect.Descriptor instead. -func (*DeleteReplicatorRequest) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{13} -} - -func (x *DeleteReplicatorRequest) GetPeerID() []byte { - if x != nil { - return x.PeerID - } - return nil -} - -func (x *DeleteReplicatorRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -type DeleteReplicatorReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - PeerID []byte `protobuf:"bytes,1,opt,name=peerID,proto3" json:"peerID,omitempty"` -} - -func (x *DeleteReplicatorReply) Reset() { - *x = DeleteReplicatorReply{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *DeleteReplicatorReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteReplicatorReply) ProtoMessage() {} - -func (x *DeleteReplicatorReply) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[14] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteReplicatorReply.ProtoReflect.Descriptor instead. -func (*DeleteReplicatorReply) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{14} -} - -func (x *DeleteReplicatorReply) GetPeerID() []byte { - if x != nil { - return x.PeerID - } - return nil -} - -type GetAllReplicatorRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetAllReplicatorRequest) Reset() { - *x = GetAllReplicatorRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllReplicatorRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllReplicatorRequest) ProtoMessage() {} - -func (x *GetAllReplicatorRequest) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[15] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllReplicatorRequest.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorRequest) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{15} -} - -type GetAllReplicatorReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Replicators []*GetAllReplicatorReply_Replicators `protobuf:"bytes,1,rep,name=replicators,proto3" json:"replicators,omitempty"` -} - -func (x *GetAllReplicatorReply) Reset() { - *x = GetAllReplicatorReply{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllReplicatorReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllReplicatorReply) ProtoMessage() {} - -func (x *GetAllReplicatorReply) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[16] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllReplicatorReply.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorReply) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{16} -} - -func (x *GetAllReplicatorReply) GetReplicators() []*GetAllReplicatorReply_Replicators { - if x != nil { - return x.Replicators - } - return nil -} - -type AddP2PCollectionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *AddP2PCollectionsRequest) Reset() { - *x = AddP2PCollectionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddP2PCollectionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddP2PCollectionsRequest) ProtoMessage() {} - -func (x *AddP2PCollectionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[17] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddP2PCollectionsRequest.ProtoReflect.Descriptor instead. -func (*AddP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{17} -} - -func (x *AddP2PCollectionsRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -type AddP2PCollectionsReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` -} - -func (x *AddP2PCollectionsReply) Reset() { - *x = AddP2PCollectionsReply{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *AddP2PCollectionsReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*AddP2PCollectionsReply) ProtoMessage() {} - -func (x *AddP2PCollectionsReply) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[18] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use AddP2PCollectionsReply.ProtoReflect.Descriptor instead. -func (*AddP2PCollectionsReply) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{18} -} - -func (x *AddP2PCollectionsReply) GetErr() string { - if x != nil { - return x.Err - } - return "" -} - -type RemoveP2PCollectionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []string `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *RemoveP2PCollectionsRequest) Reset() { - *x = RemoveP2PCollectionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveP2PCollectionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveP2PCollectionsRequest) ProtoMessage() {} - -func (x *RemoveP2PCollectionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[19] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveP2PCollectionsRequest.ProtoReflect.Descriptor instead. -func (*RemoveP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{19} -} - -func (x *RemoveP2PCollectionsRequest) GetCollections() []string { - if x != nil { - return x.Collections - } - return nil -} - -type RemoveP2PCollectionsReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty"` -} - -func (x *RemoveP2PCollectionsReply) Reset() { - *x = RemoveP2PCollectionsReply{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *RemoveP2PCollectionsReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*RemoveP2PCollectionsReply) ProtoMessage() {} - -func (x *RemoveP2PCollectionsReply) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[20] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use RemoveP2PCollectionsReply.ProtoReflect.Descriptor instead. -func (*RemoveP2PCollectionsReply) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{20} -} - -func (x *RemoveP2PCollectionsReply) GetErr() string { - if x != nil { - return x.Err - } - return "" -} - -type GetAllP2PCollectionsRequest struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields -} - -func (x *GetAllP2PCollectionsRequest) Reset() { - *x = GetAllP2PCollectionsRequest{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllP2PCollectionsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllP2PCollectionsRequest) ProtoMessage() {} - -func (x *GetAllP2PCollectionsRequest) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[21] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllP2PCollectionsRequest.ProtoReflect.Descriptor instead. -func (*GetAllP2PCollectionsRequest) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{21} -} - -type GetAllP2PCollectionsReply struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Collections []*GetAllP2PCollectionsReply_Collection `protobuf:"bytes,1,rep,name=collections,proto3" json:"collections,omitempty"` -} - -func (x *GetAllP2PCollectionsReply) Reset() { - *x = GetAllP2PCollectionsReply{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllP2PCollectionsReply) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllP2PCollectionsReply) ProtoMessage() {} - -func (x *GetAllP2PCollectionsReply) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[22] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllP2PCollectionsReply.ProtoReflect.Descriptor instead. -func (*GetAllP2PCollectionsReply) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{22} -} - -func (x *GetAllP2PCollectionsReply) GetCollections() []*GetAllP2PCollectionsReply_Collection { - if x != nil { - return x.Collections - } - return nil -} - -// Record is a thread record containing link data. -type Document_Log struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // block is the top-level node's raw data as an ipld.Block. - Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` -} - -func (x *Document_Log) Reset() { - *x = Document_Log{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *Document_Log) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*Document_Log) ProtoMessage() {} - -func (x *Document_Log) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[23] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use Document_Log.ProtoReflect.Descriptor instead. -func (*Document_Log) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{0, 0} -} - -func (x *Document_Log) GetBlock() []byte { - if x != nil { - return x.Block - } - return nil -} - -type PushLogRequest_Body struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - // docKey is the DocKey of the document that is affected by the log. - DocKey []byte `protobuf:"bytes,1,opt,name=docKey,proto3" json:"docKey,omitempty"` - // cid is the CID of the composite of the document. - Cid []byte `protobuf:"bytes,2,opt,name=cid,proto3" json:"cid,omitempty"` - // schemaID is the SchemaID of the collection that the document resides in. - SchemaID []byte `protobuf:"bytes,3,opt,name=schemaID,proto3" json:"schemaID,omitempty"` - // creator is the PeerID of the peer that created the log. - Creator string `protobuf:"bytes,4,opt,name=creator,proto3" json:"creator,omitempty"` - // log hold the block that represent version of the document. - Log *Document_Log `protobuf:"bytes,6,opt,name=log,proto3" json:"log,omitempty"` -} - -func (x *PushLogRequest_Body) Reset() { - *x = PushLogRequest_Body{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *PushLogRequest_Body) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*PushLogRequest_Body) ProtoMessage() {} - -func (x *PushLogRequest_Body) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[24] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use PushLogRequest_Body.ProtoReflect.Descriptor instead. -func (*PushLogRequest_Body) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{7, 0} -} - -func (x *PushLogRequest_Body) GetDocKey() []byte { - if x != nil { - return x.DocKey - } - return nil -} - -func (x *PushLogRequest_Body) GetCid() []byte { - if x != nil { - return x.Cid - } - return nil -} - -func (x *PushLogRequest_Body) GetSchemaID() []byte { - if x != nil { - return x.SchemaID - } - return nil -} - -func (x *PushLogRequest_Body) GetCreator() string { - if x != nil { - return x.Creator - } - return "" -} - -func (x *PushLogRequest_Body) GetLog() *Document_Log { - if x != nil { - return x.Log - } - return nil -} - -type GetAllReplicatorReply_Replicators struct { +// Record is a thread record containing link data. +type Document_Log struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Info *GetAllReplicatorReply_Replicators_Info `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` - Schemas []string `protobuf:"bytes,2,rep,name=schemas,proto3" json:"schemas,omitempty"` + // block is the top-level node's raw data as an ipld.Block. + Block []byte `protobuf:"bytes,1,opt,name=block,proto3" json:"block,omitempty"` } -func (x *GetAllReplicatorReply_Replicators) Reset() { - *x = GetAllReplicatorReply_Replicators{} +func (x *Document_Log) Reset() { + *x = Document_Log{} if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[25] + mi := &file_net_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetAllReplicatorReply_Replicators) String() string { +func (x *Document_Log) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetAllReplicatorReply_Replicators) ProtoMessage() {} +func (*Document_Log) ProtoMessage() {} -func (x *GetAllReplicatorReply_Replicators) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[25] +func (x *Document_Log) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1198,51 +504,52 @@ func (x *GetAllReplicatorReply_Replicators) ProtoReflect() protoreflect.Message return mi.MessageOf(x) } -// Deprecated: Use GetAllReplicatorReply_Replicators.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorReply_Replicators) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{16, 0} -} - -func (x *GetAllReplicatorReply_Replicators) GetInfo() *GetAllReplicatorReply_Replicators_Info { - if x != nil { - return x.Info - } - return nil +// Deprecated: Use Document_Log.ProtoReflect.Descriptor instead. +func (*Document_Log) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{0, 0} } -func (x *GetAllReplicatorReply_Replicators) GetSchemas() []string { +func (x *Document_Log) GetBlock() []byte { if x != nil { - return x.Schemas + return x.Block } return nil } -type GetAllReplicatorReply_Replicators_Info struct { +type PushLogRequest_Body struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Addrs []byte `protobuf:"bytes,2,opt,name=addrs,proto3" json:"addrs,omitempty"` + // docKey is the DocKey of the document that is affected by the log. + DocKey []byte `protobuf:"bytes,1,opt,name=docKey,proto3" json:"docKey,omitempty"` + // cid is the CID of the composite of the document. + Cid []byte `protobuf:"bytes,2,opt,name=cid,proto3" json:"cid,omitempty"` + // schemaRoot is the SchemaRoot of the collection that the document resides in. + SchemaRoot []byte `protobuf:"bytes,3,opt,name=schemaRoot,proto3" json:"schemaRoot,omitempty"` + // creator is the PeerID of the peer that created the log. + Creator string `protobuf:"bytes,4,opt,name=creator,proto3" json:"creator,omitempty"` + // log hold the block that represent version of the document. + Log *Document_Log `protobuf:"bytes,6,opt,name=log,proto3" json:"log,omitempty"` } -func (x *GetAllReplicatorReply_Replicators_Info) Reset() { - *x = GetAllReplicatorReply_Replicators_Info{} +func (x *PushLogRequest_Body) Reset() { + *x = PushLogRequest_Body{} if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[26] + mi := &file_net_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *GetAllReplicatorReply_Replicators_Info) String() string { +func (x *PushLogRequest_Body) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetAllReplicatorReply_Replicators_Info) ProtoMessage() {} +func (*PushLogRequest_Body) ProtoMessage() {} -func (x *GetAllReplicatorReply_Replicators_Info) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[26] +func (x *PushLogRequest_Body) ProtoReflect() protoreflect.Message { + mi := &file_net_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1253,78 +560,44 @@ func (x *GetAllReplicatorReply_Replicators_Info) ProtoReflect() protoreflect.Mes return mi.MessageOf(x) } -// Deprecated: Use GetAllReplicatorReply_Replicators_Info.ProtoReflect.Descriptor instead. -func (*GetAllReplicatorReply_Replicators_Info) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{16, 0, 0} +// Deprecated: Use PushLogRequest_Body.ProtoReflect.Descriptor instead. +func (*PushLogRequest_Body) Descriptor() ([]byte, []int) { + return file_net_proto_rawDescGZIP(), []int{7, 0} } -func (x *GetAllReplicatorReply_Replicators_Info) GetId() []byte { +func (x *PushLogRequest_Body) GetDocKey() []byte { if x != nil { - return x.Id + return x.DocKey } return nil } -func (x *GetAllReplicatorReply_Replicators_Info) GetAddrs() []byte { +func (x *PushLogRequest_Body) GetCid() []byte { if x != nil { - return x.Addrs + return x.Cid } return nil } -type GetAllP2PCollectionsReply_Collection struct { - state protoimpl.MessageState - sizeCache protoimpl.SizeCache - unknownFields protoimpl.UnknownFields - - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` -} - -func (x *GetAllP2PCollectionsReply_Collection) Reset() { - *x = GetAllP2PCollectionsReply_Collection{} - if protoimpl.UnsafeEnabled { - mi := &file_net_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) - } -} - -func (x *GetAllP2PCollectionsReply_Collection) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetAllP2PCollectionsReply_Collection) ProtoMessage() {} - -func (x *GetAllP2PCollectionsReply_Collection) ProtoReflect() protoreflect.Message { - mi := &file_net_proto_msgTypes[27] - if protoimpl.UnsafeEnabled && x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +func (x *PushLogRequest_Body) GetSchemaRoot() []byte { + if x != nil { + return x.SchemaRoot } - return mi.MessageOf(x) -} - -// Deprecated: Use GetAllP2PCollectionsReply_Collection.ProtoReflect.Descriptor instead. -func (*GetAllP2PCollectionsReply_Collection) Descriptor() ([]byte, []int) { - return file_net_proto_rawDescGZIP(), []int{22, 0} + return nil } -func (x *GetAllP2PCollectionsReply_Collection) GetId() string { +func (x *PushLogRequest_Body) GetCreator() string { if x != nil { - return x.Id + return x.Creator } return "" } -func (x *GetAllP2PCollectionsReply_Collection) GetName() string { +func (x *PushLogRequest_Body) GetLog() *Document_Log { if x != nil { - return x.Name + return x.Log } - return "" + return nil } var File_net_proto protoreflect.FileDescriptor @@ -1343,141 +616,47 @@ var file_net_proto_rawDesc = []byte{ 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x13, 0x0a, 0x11, 0x50, 0x75, 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x0f, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, - 0x0d, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0xd2, + 0x0d, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0xd6, 0x01, 0x0a, 0x0e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2f, 0x0a, 0x04, 0x62, 0x6f, 0x64, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x42, 0x6f, 0x64, 0x79, 0x52, 0x04, 0x62, 0x6f, - 0x64, 0x79, 0x1a, 0x8e, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x64, + 0x64, 0x79, 0x1a, 0x92, 0x01, 0x0a, 0x04, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x63, 0x4b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x64, 0x6f, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x63, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x52, 0x03, 0x63, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, - 0x44, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x49, - 0x44, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x26, 0x0a, 0x03, 0x6c, - 0x6f, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, - 0x62, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x6f, 0x67, 0x52, 0x03, - 0x6c, 0x6f, 0x67, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, - 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x0e, 0x0a, 0x0c, 0x50, 0x75, 0x73, 0x68, - 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x11, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x48, - 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x4c, 0x0a, 0x14, 0x53, - 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x61, 0x64, 0x64, 0x72, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0c, 0x52, 0x04, 0x61, 0x64, 0x64, 0x72, 0x22, 0x2c, 0x0a, 0x12, 0x53, 0x65, 0x74, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, - 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x22, 0x53, 0x0a, 0x17, 0x44, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, - 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, - 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2f, 0x0a, 0x15, - 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x65, 0x65, 0x72, 0x49, 0x44, 0x22, 0x19, 0x0a, - 0x17, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x80, 0x02, 0x0a, 0x15, 0x47, 0x65, 0x74, - 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x4b, 0x0a, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, - 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, - 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, - 0x72, 0x73, 0x52, 0x0b, 0x72, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x1a, - 0x99, 0x01, 0x0a, 0x0b, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, - 0x42, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2e, 0x2e, - 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x52, 0x65, 0x70, - 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x2e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, - 0x6e, 0x66, 0x6f, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x18, 0x02, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x73, 0x1a, 0x2c, 0x0a, - 0x04, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0c, 0x52, 0x02, 0x69, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x18, 0x02, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x61, 0x64, 0x64, 0x72, 0x73, 0x22, 0x3c, 0x0a, 0x18, 0x41, - 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, - 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2a, 0x0a, 0x16, 0x41, 0x64, 0x64, - 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x3f, 0x0a, 0x1b, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, - 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x12, 0x20, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x2d, 0x0a, 0x19, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, - 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, - 0x70, 0x6c, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x1d, 0x0a, 0x1b, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, - 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x22, 0x9d, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, - 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x12, 0x4e, 0x0a, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, - 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x2e, 0x43, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0b, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x1a, 0x30, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, - 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, - 0x6e, 0x61, 0x6d, 0x65, 0x32, 0xd1, 0x02, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, - 0x12, 0x45, 0x0a, 0x0b, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, - 0x1a, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, - 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x6e, 0x65, - 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, - 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0c, 0x50, 0x75, 0x73, 0x68, 0x44, - 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x1b, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, - 0x2e, 0x50, 0x75, 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x19, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, - 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, - 0x00, 0x12, 0x36, 0x0a, 0x06, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x12, 0x15, 0x2e, 0x6e, 0x65, - 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x13, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, - 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x07, 0x50, 0x75, 0x73, - 0x68, 0x4c, 0x6f, 0x67, 0x12, 0x16, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, - 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x6e, - 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x22, 0x00, 0x12, 0x42, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, - 0x6f, 0x67, 0x12, 0x19, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x48, - 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, + 0x52, 0x03, 0x63, 0x69, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x52, + 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x72, 0x65, 0x61, 0x74, 0x6f, 0x72, 0x12, + 0x26, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x6e, + 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x44, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x4c, + 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x22, 0x13, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x48, 0x65, + 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x0e, 0x0a, 0x0c, + 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x11, 0x0a, 0x0f, + 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x32, + 0xd1, 0x02, 0x0a, 0x07, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x45, 0x0a, 0x0b, 0x47, + 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x12, 0x1a, 0x2e, 0x6e, 0x65, 0x74, + 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x18, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x65, 0x74, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x70, 0x6c, 0x79, + 0x22, 0x00, 0x12, 0x48, 0x0a, 0x0c, 0x50, 0x75, 0x73, 0x68, 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, + 0x70, 0x68, 0x12, 0x1b, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, + 0x44, 0x6f, 0x63, 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x19, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x44, 0x6f, 0x63, + 0x47, 0x72, 0x61, 0x70, 0x68, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x36, 0x0a, 0x06, + 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x12, 0x15, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, + 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x13, 0x2e, + 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, + 0x6c, 0x79, 0x22, 0x00, 0x12, 0x39, 0x0a, 0x07, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x12, + 0x16, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, + 0x2e, 0x50, 0x75, 0x73, 0x68, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, + 0x42, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x12, 0x19, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, - 0x67, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x32, 0xa3, 0x04, 0x0a, 0x0a, 0x43, 0x6f, 0x6c, - 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x4b, 0x0a, 0x0d, 0x53, 0x65, 0x74, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1c, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, - 0x62, 0x2e, 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1a, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, - 0x53, 0x65, 0x74, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, - 0x6c, 0x79, 0x22, 0x00, 0x12, 0x54, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x12, 0x1f, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, - 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x74, 0x2e, - 0x70, 0x62, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, - 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x55, 0x0a, 0x11, 0x47, 0x65, - 0x74, 0x41, 0x6c, 0x6c, 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, - 0x1f, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x52, - 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x1d, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, - 0x52, 0x65, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x6f, 0x72, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, - 0x00, 0x12, 0x57, 0x0a, 0x11, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x20, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, - 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, - 0x62, 0x2e, 0x41, 0x64, 0x64, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, 0x52, 0x65, - 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x52, 0x65, 0x6d, 0x6f, - 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, - 0x2e, 0x52, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x12, 0x60, 0x0a, 0x14, - 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, 0x62, 0x2e, 0x47, 0x65, - 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, - 0x6e, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6e, 0x65, 0x74, 0x2e, - 0x70, 0x62, 0x2e, 0x47, 0x65, 0x74, 0x41, 0x6c, 0x6c, 0x50, 0x32, 0x50, 0x43, 0x6f, 0x6c, 0x6c, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x65, 0x70, 0x6c, 0x79, 0x22, 0x00, 0x42, 0x0a, - 0x5a, 0x08, 0x2f, 0x3b, 0x6e, 0x65, 0x74, 0x5f, 0x70, 0x62, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x33, + 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x6e, 0x65, 0x74, 0x2e, 0x70, + 0x62, 0x2e, 0x47, 0x65, 0x74, 0x48, 0x65, 0x61, 0x64, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x70, 0x6c, + 0x79, 0x22, 0x00, 0x42, 0x0a, 0x5a, 0x08, 0x2f, 0x3b, 0x6e, 0x65, 0x74, 0x5f, 0x70, 0x62, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1492,70 +671,40 @@ func file_net_proto_rawDescGZIP() []byte { return file_net_proto_rawDescData } -var file_net_proto_msgTypes = make([]protoimpl.MessageInfo, 28) +var file_net_proto_msgTypes = make([]protoimpl.MessageInfo, 13) var file_net_proto_goTypes = []interface{}{ - (*Document)(nil), // 0: net.pb.Document - (*GetDocGraphRequest)(nil), // 1: net.pb.GetDocGraphRequest - (*GetDocGraphReply)(nil), // 2: net.pb.GetDocGraphReply - (*PushDocGraphRequest)(nil), // 3: net.pb.PushDocGraphRequest - (*PushDocGraphReply)(nil), // 4: net.pb.PushDocGraphReply - (*GetLogRequest)(nil), // 5: net.pb.GetLogRequest - (*GetLogReply)(nil), // 6: net.pb.GetLogReply - (*PushLogRequest)(nil), // 7: net.pb.PushLogRequest - (*GetHeadLogRequest)(nil), // 8: net.pb.GetHeadLogRequest - (*PushLogReply)(nil), // 9: net.pb.PushLogReply - (*GetHeadLogReply)(nil), // 10: net.pb.GetHeadLogReply - (*SetReplicatorRequest)(nil), // 11: net.pb.SetReplicatorRequest - (*SetReplicatorReply)(nil), // 12: net.pb.SetReplicatorReply - (*DeleteReplicatorRequest)(nil), // 13: net.pb.DeleteReplicatorRequest - (*DeleteReplicatorReply)(nil), // 14: net.pb.DeleteReplicatorReply - (*GetAllReplicatorRequest)(nil), // 15: net.pb.GetAllReplicatorRequest - (*GetAllReplicatorReply)(nil), // 16: net.pb.GetAllReplicatorReply - (*AddP2PCollectionsRequest)(nil), // 17: net.pb.AddP2PCollectionsRequest - (*AddP2PCollectionsReply)(nil), // 18: net.pb.AddP2PCollectionsReply - (*RemoveP2PCollectionsRequest)(nil), // 19: net.pb.RemoveP2PCollectionsRequest - (*RemoveP2PCollectionsReply)(nil), // 20: net.pb.RemoveP2PCollectionsReply - (*GetAllP2PCollectionsRequest)(nil), // 21: net.pb.GetAllP2PCollectionsRequest - (*GetAllP2PCollectionsReply)(nil), // 22: net.pb.GetAllP2PCollectionsReply - (*Document_Log)(nil), // 23: net.pb.Document.Log - (*PushLogRequest_Body)(nil), // 24: net.pb.PushLogRequest.Body - (*GetAllReplicatorReply_Replicators)(nil), // 25: net.pb.GetAllReplicatorReply.Replicators - (*GetAllReplicatorReply_Replicators_Info)(nil), // 26: net.pb.GetAllReplicatorReply.Replicators.Info - (*GetAllP2PCollectionsReply_Collection)(nil), // 27: net.pb.GetAllP2PCollectionsReply.Collection + (*Document)(nil), // 0: net.pb.Document + (*GetDocGraphRequest)(nil), // 1: net.pb.GetDocGraphRequest + (*GetDocGraphReply)(nil), // 2: net.pb.GetDocGraphReply + (*PushDocGraphRequest)(nil), // 3: net.pb.PushDocGraphRequest + (*PushDocGraphReply)(nil), // 4: net.pb.PushDocGraphReply + (*GetLogRequest)(nil), // 5: net.pb.GetLogRequest + (*GetLogReply)(nil), // 6: net.pb.GetLogReply + (*PushLogRequest)(nil), // 7: net.pb.PushLogRequest + (*GetHeadLogRequest)(nil), // 8: net.pb.GetHeadLogRequest + (*PushLogReply)(nil), // 9: net.pb.PushLogReply + (*GetHeadLogReply)(nil), // 10: net.pb.GetHeadLogReply + (*Document_Log)(nil), // 11: net.pb.Document.Log + (*PushLogRequest_Body)(nil), // 12: net.pb.PushLogRequest.Body } var file_net_proto_depIdxs = []int32{ - 24, // 0: net.pb.PushLogRequest.body:type_name -> net.pb.PushLogRequest.Body - 25, // 1: net.pb.GetAllReplicatorReply.replicators:type_name -> net.pb.GetAllReplicatorReply.Replicators - 27, // 2: net.pb.GetAllP2PCollectionsReply.collections:type_name -> net.pb.GetAllP2PCollectionsReply.Collection - 23, // 3: net.pb.PushLogRequest.Body.log:type_name -> net.pb.Document.Log - 26, // 4: net.pb.GetAllReplicatorReply.Replicators.info:type_name -> net.pb.GetAllReplicatorReply.Replicators.Info - 1, // 5: net.pb.Service.GetDocGraph:input_type -> net.pb.GetDocGraphRequest - 3, // 6: net.pb.Service.PushDocGraph:input_type -> net.pb.PushDocGraphRequest - 5, // 7: net.pb.Service.GetLog:input_type -> net.pb.GetLogRequest - 7, // 8: net.pb.Service.PushLog:input_type -> net.pb.PushLogRequest - 8, // 9: net.pb.Service.GetHeadLog:input_type -> net.pb.GetHeadLogRequest - 11, // 10: net.pb.Collection.SetReplicator:input_type -> net.pb.SetReplicatorRequest - 13, // 11: net.pb.Collection.DeleteReplicator:input_type -> net.pb.DeleteReplicatorRequest - 15, // 12: net.pb.Collection.GetAllReplicators:input_type -> net.pb.GetAllReplicatorRequest - 17, // 13: net.pb.Collection.AddP2PCollections:input_type -> net.pb.AddP2PCollectionsRequest - 19, // 14: net.pb.Collection.RemoveP2PCollections:input_type -> net.pb.RemoveP2PCollectionsRequest - 21, // 15: net.pb.Collection.GetAllP2PCollections:input_type -> net.pb.GetAllP2PCollectionsRequest - 2, // 16: net.pb.Service.GetDocGraph:output_type -> net.pb.GetDocGraphReply - 4, // 17: net.pb.Service.PushDocGraph:output_type -> net.pb.PushDocGraphReply - 6, // 18: net.pb.Service.GetLog:output_type -> net.pb.GetLogReply - 9, // 19: net.pb.Service.PushLog:output_type -> net.pb.PushLogReply - 10, // 20: net.pb.Service.GetHeadLog:output_type -> net.pb.GetHeadLogReply - 12, // 21: net.pb.Collection.SetReplicator:output_type -> net.pb.SetReplicatorReply - 14, // 22: net.pb.Collection.DeleteReplicator:output_type -> net.pb.DeleteReplicatorReply - 16, // 23: net.pb.Collection.GetAllReplicators:output_type -> net.pb.GetAllReplicatorReply - 18, // 24: net.pb.Collection.AddP2PCollections:output_type -> net.pb.AddP2PCollectionsReply - 20, // 25: net.pb.Collection.RemoveP2PCollections:output_type -> net.pb.RemoveP2PCollectionsReply - 22, // 26: net.pb.Collection.GetAllP2PCollections:output_type -> net.pb.GetAllP2PCollectionsReply - 16, // [16:27] is the sub-list for method output_type - 5, // [5:16] is the sub-list for method input_type - 5, // [5:5] is the sub-list for extension type_name - 5, // [5:5] is the sub-list for extension extendee - 0, // [0:5] is the sub-list for field type_name + 12, // 0: net.pb.PushLogRequest.body:type_name -> net.pb.PushLogRequest.Body + 11, // 1: net.pb.PushLogRequest.Body.log:type_name -> net.pb.Document.Log + 1, // 2: net.pb.Service.GetDocGraph:input_type -> net.pb.GetDocGraphRequest + 3, // 3: net.pb.Service.PushDocGraph:input_type -> net.pb.PushDocGraphRequest + 5, // 4: net.pb.Service.GetLog:input_type -> net.pb.GetLogRequest + 7, // 5: net.pb.Service.PushLog:input_type -> net.pb.PushLogRequest + 8, // 6: net.pb.Service.GetHeadLog:input_type -> net.pb.GetHeadLogRequest + 2, // 7: net.pb.Service.GetDocGraph:output_type -> net.pb.GetDocGraphReply + 4, // 8: net.pb.Service.PushDocGraph:output_type -> net.pb.PushDocGraphReply + 6, // 9: net.pb.Service.GetLog:output_type -> net.pb.GetLogReply + 9, // 10: net.pb.Service.PushLog:output_type -> net.pb.PushLogReply + 10, // 11: net.pb.Service.GetHeadLog:output_type -> net.pb.GetHeadLogReply + 7, // [7:12] is the sub-list for method output_type + 2, // [2:7] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_net_proto_init() } @@ -1697,150 +846,6 @@ func file_net_proto_init() { } } file_net_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReplicatorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*SetReplicatorReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteReplicatorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*DeleteReplicatorReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddP2PCollectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*AddP2PCollectionsReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveP2PCollectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*RemoveP2PCollectionsReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllP2PCollectionsRequest); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllP2PCollectionsReply); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Document_Log); i { case 0: return &v.state @@ -1852,7 +857,7 @@ func file_net_proto_init() { return nil } } - file_net_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { + file_net_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*PushLogRequest_Body); i { case 0: return &v.state @@ -1864,42 +869,6 @@ func file_net_proto_init() { return nil } } - file_net_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorReply_Replicators); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllReplicatorReply_Replicators_Info); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } - file_net_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*GetAllP2PCollectionsReply_Collection); i { - case 0: - return &v.state - case 1: - return &v.sizeCache - case 2: - return &v.unknownFields - default: - return nil - } - } } type x struct{} out := protoimpl.TypeBuilder{ @@ -1907,9 +876,9 @@ func file_net_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_net_proto_rawDesc, NumEnums: 0, - NumMessages: 28, + NumMessages: 13, NumExtensions: 0, - NumServices: 2, + NumServices: 1, }, GoTypes: file_net_proto_goTypes, DependencyIndexes: file_net_proto_depIdxs, diff --git a/net/pb/net.proto b/net/pb/net.proto index a4799a1d89..45c38bb256 100644 --- a/net/pb/net.proto +++ b/net/pb/net.proto @@ -37,8 +37,8 @@ message PushLogRequest { bytes docKey = 1; // cid is the CID of the composite of the document. bytes cid = 2; - // schemaID is the SchemaID of the collection that the document resides in. - bytes schemaID = 3; + // schemaRoot is the SchemaRoot of the collection that the document resides in. + bytes schemaRoot = 3; // creator is the PeerID of the peer that created the log. string creator = 4; // log hold the block that represent version of the document. @@ -65,82 +65,3 @@ service Service { // GetHeadLog from this peer rpc GetHeadLog(GetHeadLogRequest) returns (GetHeadLogReply) {} } - -message SetReplicatorRequest { - repeated string collections = 1; - bytes addr = 2; -} - -message SetReplicatorReply { - bytes peerID = 1; -} - -message DeleteReplicatorRequest { - bytes peerID = 1; - repeated string collections = 2; -} - -message DeleteReplicatorReply { - bytes peerID = 1; -} - -message GetAllReplicatorRequest {} - -message GetAllReplicatorReply { - message Replicators { - message Info { - bytes id = 1; - bytes addrs = 2; - } - Info info = 1; - repeated string schemas = 2; - } - - repeated Replicators replicators = 1; - -} - -message AddP2PCollectionsRequest { - repeated string collections = 1; -} - -message AddP2PCollectionsReply { - string err = 1; -} - -message RemoveP2PCollectionsRequest { - repeated string collections = 1; -} - -message RemoveP2PCollectionsReply { - string err = 1; -} - -message GetAllP2PCollectionsRequest {} - -message GetAllP2PCollectionsReply { - message Collection { - string id = 1; - string name = 2; - } - repeated Collection collections = 1; -} - - -// Collection is the peer-to-peer network API for document sync by replication and subscription to collections -service Collection { - // SetReplicator for this peer - rpc SetReplicator(SetReplicatorRequest) returns (SetReplicatorReply) {} - - // DeleteReplicator for this peer - rpc DeleteReplicator(DeleteReplicatorRequest) returns (DeleteReplicatorReply) {} - - // DeleteReplicator for this peer - rpc GetAllReplicators(GetAllReplicatorRequest) returns (GetAllReplicatorReply) {} - - rpc AddP2PCollections(AddP2PCollectionsRequest) returns (AddP2PCollectionsReply) {} - - rpc RemoveP2PCollections(RemoveP2PCollectionsRequest) returns (RemoveP2PCollectionsReply) {} - - rpc GetAllP2PCollections(GetAllP2PCollectionsRequest) returns (GetAllP2PCollectionsReply) {} -} \ No newline at end of file diff --git a/net/pb/net_grpc.pb.go b/net/pb/net_grpc.pb.go index e50cbec859..bad62cdad7 100644 --- a/net/pb/net_grpc.pb.go +++ b/net/pb/net_grpc.pb.go @@ -1,7 +1,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.3.0 -// - protoc v3.21.9 +// - protoc v3.12.4 // source: net.proto package net_pb @@ -265,284 +265,3 @@ var Service_ServiceDesc = grpc.ServiceDesc{ Streams: []grpc.StreamDesc{}, Metadata: "net.proto", } - -const ( - Collection_SetReplicator_FullMethodName = "/net.pb.Collection/SetReplicator" - Collection_DeleteReplicator_FullMethodName = "/net.pb.Collection/DeleteReplicator" - Collection_GetAllReplicators_FullMethodName = "/net.pb.Collection/GetAllReplicators" - Collection_AddP2PCollections_FullMethodName = "/net.pb.Collection/AddP2PCollections" - Collection_RemoveP2PCollections_FullMethodName = "/net.pb.Collection/RemoveP2PCollections" - Collection_GetAllP2PCollections_FullMethodName = "/net.pb.Collection/GetAllP2PCollections" -) - -// CollectionClient is the client API for Collection service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type CollectionClient interface { - // SetReplicator for this peer - SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) - // DeleteReplicator for this peer - DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) - // DeleteReplicator for this peer - GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) - AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) - RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) - GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) -} - -type collectionClient struct { - cc grpc.ClientConnInterface -} - -func NewCollectionClient(cc grpc.ClientConnInterface) CollectionClient { - return &collectionClient{cc} -} - -func (c *collectionClient) SetReplicator(ctx context.Context, in *SetReplicatorRequest, opts ...grpc.CallOption) (*SetReplicatorReply, error) { - out := new(SetReplicatorReply) - err := c.cc.Invoke(ctx, Collection_SetReplicator_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *collectionClient) DeleteReplicator(ctx context.Context, in *DeleteReplicatorRequest, opts ...grpc.CallOption) (*DeleteReplicatorReply, error) { - out := new(DeleteReplicatorReply) - err := c.cc.Invoke(ctx, Collection_DeleteReplicator_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *collectionClient) GetAllReplicators(ctx context.Context, in *GetAllReplicatorRequest, opts ...grpc.CallOption) (*GetAllReplicatorReply, error) { - out := new(GetAllReplicatorReply) - err := c.cc.Invoke(ctx, Collection_GetAllReplicators_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *collectionClient) AddP2PCollections(ctx context.Context, in *AddP2PCollectionsRequest, opts ...grpc.CallOption) (*AddP2PCollectionsReply, error) { - out := new(AddP2PCollectionsReply) - err := c.cc.Invoke(ctx, Collection_AddP2PCollections_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *collectionClient) RemoveP2PCollections(ctx context.Context, in *RemoveP2PCollectionsRequest, opts ...grpc.CallOption) (*RemoveP2PCollectionsReply, error) { - out := new(RemoveP2PCollectionsReply) - err := c.cc.Invoke(ctx, Collection_RemoveP2PCollections_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *collectionClient) GetAllP2PCollections(ctx context.Context, in *GetAllP2PCollectionsRequest, opts ...grpc.CallOption) (*GetAllP2PCollectionsReply, error) { - out := new(GetAllP2PCollectionsReply) - err := c.cc.Invoke(ctx, Collection_GetAllP2PCollections_FullMethodName, in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// CollectionServer is the server API for Collection service. -// All implementations must embed UnimplementedCollectionServer -// for forward compatibility -type CollectionServer interface { - // SetReplicator for this peer - SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) - // DeleteReplicator for this peer - DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) - // DeleteReplicator for this peer - GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) - AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) - RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) - GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) - mustEmbedUnimplementedCollectionServer() -} - -// UnimplementedCollectionServer must be embedded to have forward compatible implementations. -type UnimplementedCollectionServer struct { -} - -func (UnimplementedCollectionServer) SetReplicator(context.Context, *SetReplicatorRequest) (*SetReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method SetReplicator not implemented") -} -func (UnimplementedCollectionServer) DeleteReplicator(context.Context, *DeleteReplicatorRequest) (*DeleteReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method DeleteReplicator not implemented") -} -func (UnimplementedCollectionServer) GetAllReplicators(context.Context, *GetAllReplicatorRequest) (*GetAllReplicatorReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllReplicators not implemented") -} -func (UnimplementedCollectionServer) AddP2PCollections(context.Context, *AddP2PCollectionsRequest) (*AddP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method AddP2PCollections not implemented") -} -func (UnimplementedCollectionServer) RemoveP2PCollections(context.Context, *RemoveP2PCollectionsRequest) (*RemoveP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method RemoveP2PCollections not implemented") -} -func (UnimplementedCollectionServer) GetAllP2PCollections(context.Context, *GetAllP2PCollectionsRequest) (*GetAllP2PCollectionsReply, error) { - return nil, status.Errorf(codes.Unimplemented, "method GetAllP2PCollections not implemented") -} -func (UnimplementedCollectionServer) mustEmbedUnimplementedCollectionServer() {} - -// UnsafeCollectionServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to CollectionServer will -// result in compilation errors. -type UnsafeCollectionServer interface { - mustEmbedUnimplementedCollectionServer() -} - -func RegisterCollectionServer(s grpc.ServiceRegistrar, srv CollectionServer) { - s.RegisterService(&Collection_ServiceDesc, srv) -} - -func _Collection_SetReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CollectionServer).SetReplicator(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Collection_SetReplicator_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CollectionServer).SetReplicator(ctx, req.(*SetReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Collection_DeleteReplicator_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CollectionServer).DeleteReplicator(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Collection_DeleteReplicator_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CollectionServer).DeleteReplicator(ctx, req.(*DeleteReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Collection_GetAllReplicators_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllReplicatorRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CollectionServer).GetAllReplicators(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Collection_GetAllReplicators_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CollectionServer).GetAllReplicators(ctx, req.(*GetAllReplicatorRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Collection_AddP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CollectionServer).AddP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Collection_AddP2PCollections_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CollectionServer).AddP2PCollections(ctx, req.(*AddP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Collection_RemoveP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RemoveP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CollectionServer).RemoveP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Collection_RemoveP2PCollections_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CollectionServer).RemoveP2PCollections(ctx, req.(*RemoveP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _Collection_GetAllP2PCollections_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetAllP2PCollectionsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(CollectionServer).GetAllP2PCollections(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: Collection_GetAllP2PCollections_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(CollectionServer).GetAllP2PCollections(ctx, req.(*GetAllP2PCollectionsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// Collection_ServiceDesc is the grpc.ServiceDesc for Collection service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var Collection_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "net.pb.Collection", - HandlerType: (*CollectionServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "SetReplicator", - Handler: _Collection_SetReplicator_Handler, - }, - { - MethodName: "DeleteReplicator", - Handler: _Collection_DeleteReplicator_Handler, - }, - { - MethodName: "GetAllReplicators", - Handler: _Collection_GetAllReplicators_Handler, - }, - { - MethodName: "AddP2PCollections", - Handler: _Collection_AddP2PCollections_Handler, - }, - { - MethodName: "RemoveP2PCollections", - Handler: _Collection_RemoveP2PCollections_Handler, - }, - { - MethodName: "GetAllP2PCollections", - Handler: _Collection_GetAllP2PCollections_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "net.proto", -} diff --git a/net/pb/net_vtproto.pb.go b/net/pb/net_vtproto.pb.go index 9ac8b5c379..ae28bba13d 100644 --- a/net/pb/net_vtproto.pb.go +++ b/net/pb/net_vtproto.pb.go @@ -1,5 +1,5 @@ // Code generated by protoc-gen-go-vtproto. DO NOT EDIT. -// protoc-gen-go-vtproto version: v0.4.0 +// protoc-gen-go-vtproto version: v0.5.0 // source: net.proto package net_pb @@ -350,10 +350,10 @@ func (m *PushLogRequest_Body) MarshalToSizedBufferVT(dAtA []byte) (int, error) { i-- dAtA[i] = 0x22 } - if len(m.SchemaID) > 0 { - i -= len(m.SchemaID) - copy(dAtA[i:], m.SchemaID) - i = encodeVarint(dAtA, i, uint64(len(m.SchemaID))) + if len(m.SchemaRoot) > 0 { + i -= len(m.SchemaRoot) + copy(dAtA[i:], m.SchemaRoot) + i = encodeVarint(dAtA, i, uint64(len(m.SchemaRoot))) i-- dAtA[i] = 0x1a } @@ -516,1939 +516,190 @@ func (m *GetHeadLogReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *SetReplicatorRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err +func encodeVarint(dAtA []byte, offset int, v uint64) int { + offset -= sov(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ } - return dAtA[:n], nil -} - -func (m *SetReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + dAtA[offset] = uint8(v) + return base } - -func (m *SetReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Document_Log) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Addr) > 0 { - i -= len(m.Addr) - copy(dAtA[i:], m.Addr) - i = encodeVarint(dAtA, i, uint64(len(m.Addr))) - i-- - dAtA[i] = 0x12 - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *SetReplicatorReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.Block) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - return dAtA[:n], nil -} - -func (m *SetReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + n += len(m.unknownFields) + return n } -func (m *SetReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *Document) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteReplicatorRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil + l = len(m.DocKey) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + l = len(m.Head) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - return dAtA[:n], nil -} - -func (m *DeleteReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + n += len(m.unknownFields) + return n } -func (m *DeleteReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetDocGraphRequest) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0x12 - } - } - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *DeleteReplicatorReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *DeleteReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + n += len(m.unknownFields) + return n } -func (m *DeleteReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetDocGraphReply) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.PeerID) > 0 { - i -= len(m.PeerID) - copy(dAtA[i:], m.PeerID) - i = encodeVarint(dAtA, i, uint64(len(m.PeerID))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllReplicatorRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + n += len(m.unknownFields) + return n } -func (m *GetAllReplicatorRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PushDocGraphRequest) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - return len(dAtA) - i, nil + n += len(m.unknownFields) + return n } -func (m *GetAllReplicatorReply_Replicators_Info) MarshalVT() (dAtA []byte, err error) { +func (m *PushDocGraphReply) SizeVT() (n int) { if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + return 0 } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply_Replicators_Info) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + var l int + _ = l + n += len(m.unknownFields) + return n } -func (m *GetAllReplicatorReply_Replicators_Info) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *GetLogRequest) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Addrs) > 0 { - i -= len(m.Addrs) - copy(dAtA[i:], m.Addrs) - i = encodeVarint(dAtA, i, uint64(len(m.Addrs))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarint(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil + n += len(m.unknownFields) + return n } -func (m *GetAllReplicatorReply_Replicators) MarshalVT() (dAtA []byte, err error) { +func (m *GetLogReply) SizeVT() (n int) { if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err + return 0 } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply_Replicators) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) + var l int + _ = l + n += len(m.unknownFields) + return n } -func (m *GetAllReplicatorReply_Replicators) MarshalToSizedBufferVT(dAtA []byte) (int, error) { +func (m *PushLogRequest_Body) SizeVT() (n int) { if m == nil { - return 0, nil + return 0 } - i := len(dAtA) - _ = i var l int _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) + l = len(m.DocKey) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - if len(m.Schemas) > 0 { - for iNdEx := len(m.Schemas) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Schemas[iNdEx]) - copy(dAtA[i:], m.Schemas[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Schemas[iNdEx]))) - i-- - dAtA[i] = 0x12 - } + l = len(m.Cid) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - if m.Info != nil { - size, err := m.Info.MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa + l = len(m.SchemaRoot) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - return len(dAtA) - i, nil -} - -func (m *GetAllReplicatorReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil + l = len(m.Creator) + if l > 0 { + n += 1 + l + sov(uint64(l)) } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllReplicatorReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllReplicatorReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Replicators) > 0 { - for iNdEx := len(m.Replicators) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Replicators[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AddP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *AddP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *AddP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *AddP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *AddP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Err) > 0 { - i -= len(m.Err) - copy(dAtA[i:], m.Err) - i = encodeVarint(dAtA, i, uint64(len(m.Err))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *RemoveP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *RemoveP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - i -= len(m.Collections[iNdEx]) - copy(dAtA[i:], m.Collections[iNdEx]) - i = encodeVarint(dAtA, i, uint64(len(m.Collections[iNdEx]))) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func (m *RemoveP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *RemoveP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *RemoveP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Err) > 0 { - i -= len(m.Err) - copy(dAtA[i:], m.Err) - i = encodeVarint(dAtA, i, uint64(len(m.Err))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsRequest) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsRequest) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsRequest) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsReply_Collection) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Name) > 0 { - i -= len(m.Name) - copy(dAtA[i:], m.Name) - i = encodeVarint(dAtA, i, uint64(len(m.Name))) - i-- - dAtA[i] = 0x12 - } - if len(m.Id) > 0 { - i -= len(m.Id) - copy(dAtA[i:], m.Id) - i = encodeVarint(dAtA, i, uint64(len(m.Id))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - -func (m *GetAllP2PCollectionsReply) MarshalVT() (dAtA []byte, err error) { - if m == nil { - return nil, nil - } - size := m.SizeVT() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBufferVT(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *GetAllP2PCollectionsReply) MarshalToVT(dAtA []byte) (int, error) { - size := m.SizeVT() - return m.MarshalToSizedBufferVT(dAtA[:size]) -} - -func (m *GetAllP2PCollectionsReply) MarshalToSizedBufferVT(dAtA []byte) (int, error) { - if m == nil { - return 0, nil - } - i := len(dAtA) - _ = i - var l int - _ = l - if m.unknownFields != nil { - i -= len(m.unknownFields) - copy(dAtA[i:], m.unknownFields) - } - if len(m.Collections) > 0 { - for iNdEx := len(m.Collections) - 1; iNdEx >= 0; iNdEx-- { - size, err := m.Collections[iNdEx].MarshalToSizedBufferVT(dAtA[:i]) - if err != nil { - return 0, err - } - i -= size - i = encodeVarint(dAtA, i, uint64(size)) - i-- - dAtA[i] = 0xa - } - } - return len(dAtA) - i, nil -} - -func encodeVarint(dAtA []byte, offset int, v uint64) int { - offset -= sov(v) - base := offset - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return base -} -func (m *Document_Log) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Block) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *Document) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DocKey) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Head) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetDocGraphRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetDocGraphReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *PushDocGraphRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *PushDocGraphReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetLogRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetLogReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *PushLogRequest_Body) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.DocKey) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Cid) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.SchemaID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Creator) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if m.Log != nil { - l = m.Log.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *PushLogRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Body != nil { - l = m.Body.SizeVT() - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetHeadLogRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *PushLogReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetHeadLogReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *SetReplicatorRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - l = len(m.Addr) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *SetReplicatorReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *DeleteReplicatorRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *DeleteReplicatorReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.PeerID) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorReply_Replicators_Info) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Addrs) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorReply_Replicators) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if m.Info != nil { - l = m.Info.SizeVT() - n += 1 + l + sov(uint64(l)) - } - if len(m.Schemas) > 0 { - for _, s := range m.Schemas { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllReplicatorReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Replicators) > 0 { - for _, e := range m.Replicators { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *AddP2PCollectionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *AddP2PCollectionsReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Err) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *RemoveP2PCollectionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, s := range m.Collections { - l = len(s) - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func (m *RemoveP2PCollectionsReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Err) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllP2PCollectionsRequest) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - n += len(m.unknownFields) - return n -} - -func (m *GetAllP2PCollectionsReply_Collection) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Id) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - l = len(m.Name) - if l > 0 { - n += 1 + l + sov(uint64(l)) - } - n += len(m.unknownFields) - return n -} - -func (m *GetAllP2PCollectionsReply) SizeVT() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - if len(m.Collections) > 0 { - for _, e := range m.Collections { - l = e.SizeVT() - n += 1 + l + sov(uint64(l)) - } - } - n += len(m.unknownFields) - return n -} - -func sov(x uint64) (n int) { - return (bits.Len64(x|1) + 6) / 7 -} -func soz(x uint64) (n int) { - return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Document_Log) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Document_Log: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Document_Log: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Block = append(m.Block[:0], dAtA[iNdEx:postIndex]...) - if m.Block == nil { - m.Block = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Document) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Document: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Document: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DocKey = append(m.DocKey[:0], dAtA[iNdEx:postIndex]...) - if m.DocKey == nil { - m.DocKey = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Head", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Head = append(m.Head[:0], dAtA[iNdEx:postIndex]...) - if m.Head == nil { - m.Head = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetDocGraphRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetDocGraphRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetDocGraphReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetDocGraphReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PushDocGraphRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushDocGraphRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PushDocGraphReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushDocGraphReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetLogRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetLogRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetLogReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetLogReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetLogReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PushLogRequest_Body) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushLogRequest_Body: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushLogRequest_Body: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.DocKey = append(m.DocKey[:0], dAtA[iNdEx:postIndex]...) - if m.DocKey == nil { - m.DocKey = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Cid = append(m.Cid[:0], dAtA[iNdEx:postIndex]...) - if m.Cid == nil { - m.Cid = []byte{} - } - iNdEx = postIndex - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field SchemaID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.SchemaID = append(m.SchemaID[:0], dAtA[iNdEx:postIndex]...) - if m.SchemaID == nil { - m.SchemaID = []byte{} - } - iNdEx = postIndex - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Creator = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Log == nil { - m.Log = &Document_Log{} - } - if err := m.Log.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PushLogRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PushLogRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PushLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Body == nil { - m.Body = &PushLogRequest_Body{} - } - if err := m.Body.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetHeadLogRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetHeadLogRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetHeadLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } + if m.Log != nil { + l = m.Log.SizeVT() + n += 1 + l + sov(uint64(l)) } + n += len(m.unknownFields) + return n +} - if iNdEx > l { - return io.ErrUnexpectedEOF +func (m *PushLogRequest) SizeVT() (n int) { + if m == nil { + return 0 } - return nil + var l int + _ = l + if m.Body != nil { + l = m.Body.SizeVT() + n += 1 + l + sov(uint64(l)) + } + n += len(m.unknownFields) + return n } -func (m *PushLogReply) UnmarshalVT(dAtA []byte) error { + +func (m *GetHeadLogRequest) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *PushLogReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func (m *GetHeadLogReply) SizeVT() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + n += len(m.unknownFields) + return n +} + +func sov(x uint64) (n int) { + return (bits.Len64(x|1) + 6) / 7 +} +func soz(x uint64) (n int) { + return sov(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Document_Log) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2471,63 +722,46 @@ func (m *PushLogReply) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: PushLogReply: wiretype end group for non-group") + return fmt.Errorf("proto: Document_Log: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: PushLogReply: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Document_Log: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Block", wireType) } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + if byteLen < 0 { + return ErrInvalidLength } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *GetHeadLogReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength } - if iNdEx >= l { + if postIndex > l { return io.ErrUnexpectedEOF } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.Block = append(m.Block[:0], dAtA[iNdEx:postIndex]...) + if m.Block == nil { + m.Block = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: GetHeadLogReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: GetHeadLogReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2550,7 +784,7 @@ func (m *GetHeadLogReply) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { +func (m *Document) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2573,17 +807,17 @@ func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetReplicatorRequest: wiretype end group for non-group") + return fmt.Errorf("proto: Document: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: Document: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -2593,27 +827,29 @@ func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + m.DocKey = append(m.DocKey[:0], dAtA[iNdEx:postIndex]...) + if m.DocKey == nil { + m.DocKey = []byte{} + } iNdEx = postIndex - case 2: + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addr", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Head", wireType) } var byteLen int for shift := uint(0); ; shift += 7 { @@ -2640,9 +876,9 @@ func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Addr = append(m.Addr[:0], dAtA[iNdEx:postIndex]...) - if m.Addr == nil { - m.Addr = []byte{} + m.Head = append(m.Head[:0], dAtA[iNdEx:postIndex]...) + if m.Head == nil { + m.Head = []byte{} } iNdEx = postIndex default: @@ -2667,7 +903,7 @@ func (m *SetReplicatorRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *SetReplicatorReply) UnmarshalVT(dAtA []byte) error { +func (m *GetDocGraphRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2690,46 +926,12 @@ func (m *SetReplicatorReply) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: SetReplicatorReply: wiretype end group for non-group") + return fmt.Errorf("proto: GetDocGraphRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: SetReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2752,7 +954,7 @@ func (m *SetReplicatorReply) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteReplicatorRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetDocGraphReply) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2772,81 +974,15 @@ func (m *DeleteReplicatorRequest) UnmarshalVT(dAtA []byte) error { break } } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: DeleteReplicatorRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GetDocGraphReply: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GetDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2869,7 +1005,7 @@ func (m *DeleteReplicatorRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *DeleteReplicatorReply) UnmarshalVT(dAtA []byte) error { +func (m *PushDocGraphRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2892,46 +1028,12 @@ func (m *DeleteReplicatorReply) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: DeleteReplicatorReply: wiretype end group for non-group") + return fmt.Errorf("proto: PushDocGraphRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: DeleteReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PushDocGraphRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerID", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.PeerID = append(m.PeerID[:0], dAtA[iNdEx:postIndex]...) - if m.PeerID == nil { - m.PeerID = []byte{} - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -2954,7 +1056,7 @@ func (m *DeleteReplicatorReply) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetAllReplicatorRequest) UnmarshalVT(dAtA []byte) error { +func (m *PushDocGraphReply) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -2977,10 +1079,10 @@ func (m *GetAllReplicatorRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorRequest: wiretype end group for non-group") + return fmt.Errorf("proto: PushDocGraphReply: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PushDocGraphReply: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -3005,7 +1107,7 @@ func (m *GetAllReplicatorRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetAllReplicatorReply_Replicators_Info) UnmarshalVT(dAtA []byte) error { +func (m *GetLogRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3028,80 +1130,12 @@ func (m *GetAllReplicatorReply_Replicators_Info) UnmarshalVT(dAtA []byte) error fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: wiretype end group for non-group") + return fmt.Errorf("proto: GetLogRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators_Info: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = append(m.Id[:0], dAtA[iNdEx:postIndex]...) - if m.Id == nil { - m.Id = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Addrs", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + byteLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Addrs = append(m.Addrs[:0], dAtA[iNdEx:postIndex]...) - if m.Addrs == nil { - m.Addrs = []byte{} - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -3124,7 +1158,7 @@ func (m *GetAllReplicatorReply_Replicators_Info) UnmarshalVT(dAtA []byte) error } return nil } -func (m *GetAllReplicatorReply_Replicators) UnmarshalVT(dAtA []byte) error { +func (m *GetLogReply) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3147,80 +1181,12 @@ func (m *GetAllReplicatorReply_Replicators) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: wiretype end group for non-group") + return fmt.Errorf("proto: GetLogReply: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply_Replicators: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetLogReply: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Info == nil { - m.Info = &GetAllReplicatorReply_Replicators_Info{} - } - if err := m.Info.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Schemas", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Schemas = append(m.Schemas, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -3243,7 +1209,7 @@ func (m *GetAllReplicatorReply_Replicators) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetAllReplicatorReply) UnmarshalVT(dAtA []byte) error { +func (m *PushLogRequest_Body) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3266,17 +1232,17 @@ func (m *GetAllReplicatorReply) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllReplicatorReply: wiretype end group for non-group") + return fmt.Errorf("proto: PushLogRequest_Body: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllReplicatorReply: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PushLogRequest_Body: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Replicators", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field DocKey", wireType) } - var msglen int + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3286,82 +1252,65 @@ func (m *GetAllReplicatorReply) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - msglen |= int(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - if msglen < 0 { + if byteLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + msglen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Replicators = append(m.Replicators, &GetAllReplicatorReply_Replicators{}) - if err := m.Replicators[len(m.Replicators)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err + m.DocKey = append(m.DocKey[:0], dAtA[iNdEx:postIndex]...) + if m.DocKey == nil { + m.DocKey = []byte{} } iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Cid", wireType) } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflow + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } } - if iNdEx >= l { - return io.ErrUnexpectedEOF + if byteLen < 0 { + return ErrInvalidLength } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLength } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Cid = append(m.Cid[:0], dAtA[iNdEx:postIndex]...) + if m.Cid == nil { + m.Cid = []byte{} + } + iNdEx = postIndex + case 3: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field SchemaRoot", wireType) } - var stringLen uint64 + var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3371,78 +1320,29 @@ func (m *AddP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + byteLen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if byteLen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + byteLen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *AddP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break + m.SchemaRoot = append(m.SchemaRoot[:0], dAtA[iNdEx:postIndex]...) + if m.SchemaRoot == nil { + m.SchemaRoot = []byte{} } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: AddP2PCollectionsReply: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: AddP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + iNdEx = postIndex + case 4: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Creator", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { @@ -3470,64 +1370,13 @@ func (m *AddP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Err = string(dAtA[iNdEx:postIndex]) + m.Creator = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skip(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLength - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.unknownFields = append(m.unknownFields, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *RemoveP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: RemoveP2PCollectionsRequest: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: + case 6: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Log", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3537,23 +1386,27 @@ func (m *RemoveP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Collections = append(m.Collections, string(dAtA[iNdEx:postIndex])) + if m.Log == nil { + m.Log = &Document_Log{} + } + if err := m.Log.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3577,7 +1430,7 @@ func (m *RemoveP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { +func (m *PushLogRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3600,17 +1453,17 @@ func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: RemoveP2PCollectionsReply: wiretype end group for non-group") + return fmt.Errorf("proto: PushLogRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: RemoveP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PushLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Err", wireType) + return fmt.Errorf("proto: wrong wireType = %d for field Body", wireType) } - var stringLen uint64 + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflow @@ -3620,23 +1473,27 @@ func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - stringLen |= uint64(b&0x7F) << shift + msglen |= int(b&0x7F) << shift if b < 0x80 { break } } - intStringLen := int(stringLen) - if intStringLen < 0 { + if msglen < 0 { return ErrInvalidLength } - postIndex := iNdEx + intStringLen + postIndex := iNdEx + msglen if postIndex < 0 { return ErrInvalidLength } if postIndex > l { return io.ErrUnexpectedEOF } - m.Err = string(dAtA[iNdEx:postIndex]) + if m.Body == nil { + m.Body = &PushLogRequest_Body{} + } + if err := m.Body.UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { + return err + } iNdEx = postIndex default: iNdEx = preIndex @@ -3660,7 +1517,7 @@ func (m *RemoveP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetAllP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { +func (m *GetHeadLogRequest) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3683,10 +1540,10 @@ func (m *GetAllP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsRequest: wiretype end group for non-group") + return fmt.Errorf("proto: GetHeadLogRequest: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetHeadLogRequest: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: @@ -3711,7 +1568,7 @@ func (m *GetAllP2PCollectionsRequest) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetAllP2PCollectionsReply_Collection) UnmarshalVT(dAtA []byte) error { +func (m *PushLogReply) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3734,76 +1591,12 @@ func (m *GetAllP2PCollectionsReply_Collection) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: wiretype end group for non-group") + return fmt.Errorf("proto: PushLogReply: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply_Collection: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: PushLogReply: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Id = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Name = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) @@ -3826,7 +1619,7 @@ func (m *GetAllP2PCollectionsReply_Collection) UnmarshalVT(dAtA []byte) error { } return nil } -func (m *GetAllP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { +func (m *GetHeadLogReply) UnmarshalVT(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { @@ -3849,46 +1642,12 @@ func (m *GetAllP2PCollectionsReply) UnmarshalVT(dAtA []byte) error { fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply: wiretype end group for non-group") + return fmt.Errorf("proto: GetHeadLogReply: wiretype end group for non-group") } if fieldNum <= 0 { - return fmt.Errorf("proto: GetAllP2PCollectionsReply: illegal tag %d (wire type %d)", fieldNum, wire) + return fmt.Errorf("proto: GetHeadLogReply: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Collections", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflow - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= int(b&0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLength - } - postIndex := iNdEx + msglen - if postIndex < 0 { - return ErrInvalidLength - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Collections = append(m.Collections, &GetAllP2PCollectionsReply_Collection{}) - if err := m.Collections[len(m.Collections)-1].UnmarshalVT(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex default: iNdEx = preIndex skippy, err := skip(dAtA[iNdEx:]) diff --git a/net/peer.go b/net/peer.go index 26a24a38ae..2e702584a8 100644 --- a/net/peer.go +++ b/net/peer.go @@ -14,7 +14,6 @@ package net import ( "context" - "fmt" "sync" "time" @@ -32,10 +31,7 @@ import ( "github.com/libp2p/go-libp2p/core/peer" peerstore "github.com/libp2p/go-libp2p/core/peerstore" "github.com/libp2p/go-libp2p/core/routing" - ma "github.com/multiformats/go-multiaddr" "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core" @@ -86,8 +82,6 @@ type Peer struct { ctx context.Context cancel context.CancelFunc - - pb.UnimplementedCollectionServer } // NewPeer creates a new instance of the DefraDB server as a peer-to-peer node. @@ -97,7 +91,6 @@ func NewPeer( h host.Host, dht routing.Routing, ps *pubsub.PubSub, - tcpAddr ma.Multiaddr, serverOptions []grpc.ServerOption, dialOptions []grpc.DialOption, ) (*Peer, error) { @@ -200,7 +193,7 @@ func (p *Peer) Start() error { } // Close the peer node and all its internal workers/goroutines/loops. -func (p *Peer) Close() error { +func (p *Peer) Close() { // close topics if err := p.server.removeAllPubsubTopics(); err != nil { log.ErrorE(p.ctx, "Error closing pubsub topics", err) @@ -240,7 +233,6 @@ func (p *Peer) Close() error { } p.cancel() - return nil } // handleBroadcast loop manages the transition of messages @@ -277,7 +269,7 @@ func (p *Peer) RegisterNewDocument( dockey client.DocKey, c cid.Cid, nd ipld.Node, - schemaID string, + schemaRoot string, ) error { log.Debug( p.ctx, @@ -286,7 +278,7 @@ func (p *Peer) RegisterNewDocument( ) // register topic - if err := p.server.addPubSubTopic(dockey.String(), !p.server.hasPubSubTopic(schemaID)); err != nil { + if err := p.server.addPubSubTopic(dockey.String(), !p.server.hasPubSubTopic(schemaRoot)); err != nil { log.ErrorE( p.ctx, "Failed to create new pubsub topic", @@ -298,10 +290,10 @@ func (p *Peer) RegisterNewDocument( // publish log body := &pb.PushLogRequest_Body{ - DocKey: []byte(dockey.String()), - Cid: c.Bytes(), - SchemaID: []byte(schemaID), - Creator: p.host.ID().String(), + DocKey: []byte(dockey.String()), + Cid: c.Bytes(), + SchemaRoot: []byte(schemaRoot), + Creator: p.host.ID().String(), Log: &pb.Document_Log{ Block: nd.RawData(), }, @@ -310,154 +302,7 @@ func (p *Peer) RegisterNewDocument( Body: body, } - return p.server.publishLog(p.ctx, schemaID, req) -} - -func marshalPeerID(id peer.ID) []byte { - b, _ := id.Marshal() // This will never return an error - return b -} - -// SetReplicator adds a target peer node as a replication destination for documents in our DB. -func (p *Peer) SetReplicator( - ctx context.Context, - req *pb.SetReplicatorRequest, -) (*pb.SetReplicatorReply, error) { - addr, err := ma.NewMultiaddrBytes(req.Addr) - if err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - txn, err := p.db.NewTxn(ctx, true) - if err != nil { - return nil, err - } - store := p.db.WithTxn(txn) - - pid, err := p.setReplicator(ctx, store, addr, req.Collections...) - if err != nil { - txn.Discard(ctx) - return nil, err - } - - return &pb.SetReplicatorReply{ - PeerID: marshalPeerID(pid), - }, txn.Commit(ctx) -} - -// setReplicator adds a target peer node as a replication destination for documents in our DB. -func (p *Peer) setReplicator( - ctx context.Context, - store client.Store, - paddr ma.Multiaddr, - collectionNames ...string, -) (peer.ID, error) { - var pid peer.ID - - // verify collections - collections := []client.Collection{} - schemas := []string{} - if len(collectionNames) == 0 { - var err error - collections, err = store.GetAllCollections(ctx) - if err != nil { - return pid, errors.Wrap("failed to get all collections for replicator", err) - } - for _, col := range collections { - schemas = append(schemas, col.SchemaID()) - } - } else { - for _, cName := range collectionNames { - col, err := store.GetCollectionByName(ctx, cName) - if err != nil { - return pid, errors.Wrap("failed to get collection for replicator", err) - } - collections = append(collections, col) - schemas = append(schemas, col.SchemaID()) - } - } - - // extra peerID - // Extract peer portion - p2p, err := paddr.ValueForProtocol(ma.P_P2P) - if err != nil { - return pid, err - } - pid, err = peer.Decode(p2p) - if err != nil { - return pid, err - } - - // make sure it's not ourselves - if pid == p.host.ID() { - return pid, errors.New("can't target ourselves as a replicator") - } - - // add peer to peerstore - // Extract the peer ID from the multiaddr. - info, err := peer.AddrInfoFromP2pAddr(paddr) - if err != nil { - return pid, errors.Wrap(fmt.Sprintf("Failed to address info from %s", paddr), err) - } - - // Add the destination's peer multiaddress in the peerstore. - // This will be used during connection and stream creation by libp2p. - p.host.Peerstore().AddAddrs(info.ID, info.Addrs, peerstore.PermanentAddrTTL) - - // make sure we're not duplicating things - p.mu.Lock() - for _, col := range collections { - if reps, exists := p.replicators[col.SchemaID()]; exists { - if _, exists := reps[pid]; exists { - p.mu.Unlock() - return pid, errors.New(fmt.Sprintf( - "Replicator already exists for %s with PeerID %s", - col.Name(), - pid, - )) - } - } else { - p.replicators[col.SchemaID()] = make(map[peer.ID]struct{}) - } - // add to replicators list for the collection - p.replicators[col.SchemaID()][pid] = struct{}{} - } - p.mu.Unlock() - - // Persist peer in datastore - err = p.db.SetReplicator(ctx, client.Replicator{ - Info: *info, - Schemas: schemas, - }) - if err != nil { - return pid, errors.Wrap("failed to persist replicator", err) - } - - for _, col := range collections { - // create read only txn and assign to col - txn, err := p.db.NewTxn(ctx, true) - if err != nil { - return pid, errors.Wrap("failed to get txn", err) - } - col = col.WithTxn(txn) - - // get dockeys (all) - keysCh, err := col.GetAllDocKeys(ctx) - if err != nil { - txn.Discard(ctx) - return pid, errors.Wrap( - fmt.Sprintf( - "Failed to get dockey for replicator %s on %s", - pid, - col.Name(), - ), - err, - ) - } - - p.pushToReplicator(ctx, txn, col, keysCh, pid) - } - return pid, nil + return p.server.publishLog(p.ctx, schemaRoot, req) } func (p *Peer) pushToReplicator( @@ -507,11 +352,11 @@ func (p *Peer) pushToReplicator( } evt := events.Update{ - DocKey: key.Key.String(), - Cid: c, - SchemaID: collection.SchemaID(), - Block: nd, - Priority: priority, + DocKey: key.Key.String(), + Cid: c, + SchemaRoot: collection.SchemaRoot(), + Block: nd, + Priority: priority, } if err := p.server.pushLog(ctx, evt, pid); err != nil { log.ErrorE( @@ -526,122 +371,8 @@ func (p *Peer) pushToReplicator( } } -// DeleteReplicator removes a peer node from the replicators. -func (p *Peer) DeleteReplicator( - ctx context.Context, - req *pb.DeleteReplicatorRequest, -) (*pb.DeleteReplicatorReply, error) { - log.Debug(ctx, "Received DeleteReplicator request") - - txn, err := p.db.NewTxn(ctx, true) - if err != nil { - return nil, err - } - store := p.db.WithTxn(txn) - - err = p.deleteReplicator(ctx, store, peer.ID(req.PeerID), req.Collections...) - if err != nil { - txn.Discard(ctx) - return nil, err - } - - return &pb.DeleteReplicatorReply{ - PeerID: req.PeerID, - }, txn.Commit(ctx) -} - -func (p *Peer) deleteReplicator( - ctx context.Context, - store client.Store, - pid peer.ID, - collectionNames ...string, -) error { - // make sure it's not ourselves - if pid == p.host.ID() { - return ErrSelfTargetForReplicator - } - - // verify collections - schemas := []string{} - schemaMap := make(map[string]struct{}) - if len(collectionNames) == 0 { - var err error - collections, err := store.GetAllCollections(ctx) - if err != nil { - return errors.Wrap("failed to get all collections for replicator", err) - } - for _, col := range collections { - schemas = append(schemas, col.SchemaID()) - schemaMap[col.SchemaID()] = struct{}{} - } - } else { - for _, cName := range collectionNames { - col, err := store.GetCollectionByName(ctx, cName) - if err != nil { - return errors.Wrap("failed to get collection for replicator", err) - } - schemas = append(schemas, col.SchemaID()) - schemaMap[col.SchemaID()] = struct{}{} - } - } - - // make sure we're not duplicating things - p.mu.Lock() - defer p.mu.Unlock() - - totalSchemas := 0 // Lets keep track of how many schemas are left for the replicator. - for schema, rep := range p.replicators { - if _, exists := rep[pid]; exists { - if _, toDelete := schemaMap[schema]; toDelete { - delete(p.replicators[schema], pid) - } else { - totalSchemas++ - } - } - } - - if totalSchemas == 0 { - // Remove the destination's peer multiaddress in the peerstore. - p.host.Peerstore().ClearAddrs(pid) - } - - // Delete peer in datastore - return p.db.DeleteReplicator(ctx, client.Replicator{ - Info: peer.AddrInfo{ID: pid}, - Schemas: schemas, - }) -} - -// GetAllReplicators returns all replicators and the schemas that are replicated to them. -func (p *Peer) GetAllReplicators( - ctx context.Context, - req *pb.GetAllReplicatorRequest, -) (*pb.GetAllReplicatorReply, error) { - log.Debug(ctx, "Received GetAllReplicators request") - - reps, err := p.db.GetAllReplicators(ctx) - if err != nil { - return nil, err - } - - pbReps := []*pb.GetAllReplicatorReply_Replicators{} - for _, rep := range reps { - pbReps = append(pbReps, &pb.GetAllReplicatorReply_Replicators{ - Info: &pb.GetAllReplicatorReply_Replicators_Info{ - Id: []byte(rep.Info.ID), - Addrs: rep.Info.Addrs[0].Bytes(), - }, - Schemas: rep.Schemas, - }) - } - - return &pb.GetAllReplicatorReply{ - Replicators: pbReps, - }, nil -} - func (p *Peer) loadReplicators(ctx context.Context) error { - reps, err := p.db.GetAllReplicators(ctx) + reps, err := p.GetAllReplicators(ctx) if err != nil { return errors.Wrap("failed to get replicators", err) } @@ -672,7 +403,7 @@ func (p *Peer) loadReplicators(ctx context.Context) error { } func (p *Peer) loadP2PCollections(ctx context.Context) (map[string]struct{}, error) { - collections, err := p.db.GetAllP2PCollections(ctx) + collections, err := p.GetAllP2PCollections(ctx) if err != nil && !errors.Is(err, ds.ErrNotFound) { return nil, err } @@ -696,7 +427,7 @@ func (p *Peer) handleDocCreateLog(evt events.Update) error { // We need to register the document before pushing to the replicators if we want to // ensure that we have subscribed to the topic. - err = p.RegisterNewDocument(p.ctx, dockey, evt.Cid, evt.Block, evt.SchemaID) + err = p.RegisterNewDocument(p.ctx, dockey, evt.Cid, evt.Block, evt.SchemaRoot) if err != nil { return err } @@ -716,13 +447,13 @@ func (p *Peer) handleDocUpdateLog(evt events.Update) error { "Preparing pubsub pushLog request from broadcast", logging.NewKV("DocKey", dockey), logging.NewKV("CID", evt.Cid), - logging.NewKV("SchemaId", evt.SchemaID)) + logging.NewKV("SchemaRoot", evt.SchemaRoot)) body := &pb.PushLogRequest_Body{ - DocKey: []byte(dockey.String()), - Cid: evt.Cid.Bytes(), - SchemaID: []byte(evt.SchemaID), - Creator: p.host.ID().String(), + DocKey: []byte(dockey.String()), + Cid: evt.Cid.Bytes(), + SchemaRoot: []byte(evt.SchemaRoot), + Creator: p.host.ID().String(), Log: &pb.Document_Log{ Block: evt.Block.RawData(), }, @@ -738,8 +469,8 @@ func (p *Peer) handleDocUpdateLog(evt events.Update) error { return NewErrPublishingToDockeyTopic(err, evt.Cid.String(), evt.DocKey) } - if err := p.server.publishLog(p.ctx, evt.SchemaID, req); err != nil { - return NewErrPublishingToSchemaTopic(err, evt.Cid.String(), evt.SchemaID) + if err := p.server.publishLog(p.ctx, evt.SchemaRoot, req); err != nil { + return NewErrPublishingToSchemaTopic(err, evt.Cid.String(), evt.SchemaRoot) } return nil @@ -751,12 +482,12 @@ func (p *Peer) pushLogToReplicators(ctx context.Context, lg events.Update) { for _, peer := range p.ps.ListPeers(lg.DocKey) { peers[peer.String()] = struct{}{} } - for _, peer := range p.ps.ListPeers(lg.SchemaID) { + for _, peer := range p.ps.ListPeers(lg.SchemaRoot) { peers[peer.String()] = struct{}{} } p.mu.Lock() - reps, exists := p.replicators[lg.SchemaID] + reps, exists := p.replicators[lg.SchemaRoot] p.mu.Unlock() if exists { @@ -849,186 +580,3 @@ func (p *Peer) rollbackRemovePubSubTopics(topics []string, cause error) error { } return cause } - -// AddP2PCollections adds the given collectionIDs to the pubsup topics. -// -// It will error if any of the given collectionIDs are invalid, in such a case some of the -// changes to the server may still be applied. -// -// WARNING: Calling this on collections with a large number of documents may take a long time to process. -func (p *Peer) AddP2PCollections( - ctx context.Context, - req *pb.AddP2PCollectionsRequest, -) (*pb.AddP2PCollectionsReply, error) { - log.Debug(ctx, "Received AddP2PCollections request") - - txn, err := p.db.NewTxn(p.ctx, false) - if err != nil { - return nil, err - } - defer txn.Discard(p.ctx) - store := p.db.WithTxn(txn) - - // first let's make sure the collections actually exists - storeCollections := []client.Collection{} - for _, col := range req.Collections { - storeCol, err := store.GetCollectionBySchemaID(p.ctx, col) - if err != nil { - return nil, err - } - storeCollections = append(storeCollections, storeCol) - } - - // Ensure we can add all the collections to the store on the transaction - // before adding to topics. - for _, col := range req.Collections { - err := store.AddP2PCollection(p.ctx, col) - if err != nil { - return nil, err - } - } - - // Add pubsub topics and remove them if we get an error. - addedTopics := []string{} - for _, col := range req.Collections { - err = p.server.addPubSubTopic(col, true) - if err != nil { - return nil, p.rollbackAddPubSubTopics(addedTopics, err) - } - addedTopics = append(addedTopics, col) - } - - // After adding the collection topics, we remove the collections' documents - // from the pubsub topics to avoid receiving duplicate events. - removedTopics := []string{} - for _, col := range storeCollections { - keyChan, err := col.GetAllDocKeys(p.ctx) - if err != nil { - return nil, err - } - for key := range keyChan { - err := p.server.removePubSubTopic(key.Key.String()) - if err != nil { - return nil, p.rollbackRemovePubSubTopics(removedTopics, err) - } - removedTopics = append(removedTopics, key.Key.String()) - } - } - - if err = txn.Commit(p.ctx); err != nil { - err = p.rollbackRemovePubSubTopics(removedTopics, err) - return nil, p.rollbackAddPubSubTopics(addedTopics, err) - } - - return &pb.AddP2PCollectionsReply{}, nil -} - -// RemoveP2PCollections removes the given collectionIDs from the pubsup topics. -// -// It will error if any of the given collectionIDs are invalid, in such a case some of the -// changes to the server may still be applied. -// -// WARNING: Calling this on collections with a large number of documents may take a long time to process. -func (p *Peer) RemoveP2PCollections( - ctx context.Context, - req *pb.RemoveP2PCollectionsRequest, -) (*pb.RemoveP2PCollectionsReply, error) { - log.Debug(ctx, "Received RemoveP2PCollections request") - - txn, err := p.db.NewTxn(p.ctx, false) - if err != nil { - return nil, err - } - defer txn.Discard(p.ctx) - store := p.db.WithTxn(txn) - - // first let's make sure the collections actually exists - storeCollections := []client.Collection{} - for _, col := range req.Collections { - storeCol, err := store.GetCollectionBySchemaID(p.ctx, col) - if err != nil { - return nil, err - } - storeCollections = append(storeCollections, storeCol) - } - - // Ensure we can remove all the collections to the store on the transaction - // before adding to topics. - for _, col := range req.Collections { - err := store.RemoveP2PCollection(p.ctx, col) - if err != nil { - return nil, err - } - } - - // Remove pubsub topics and add them back if we get an error. - removedTopics := []string{} - for _, col := range req.Collections { - err = p.server.removePubSubTopic(col) - if err != nil { - return nil, p.rollbackRemovePubSubTopics(removedTopics, err) - } - removedTopics = append(removedTopics, col) - } - - // After removing the collection topics, we add back the collections' documents - // to the pubsub topics. - addedTopics := []string{} - for _, col := range storeCollections { - keyChan, err := col.GetAllDocKeys(p.ctx) - if err != nil { - return nil, err - } - for key := range keyChan { - err := p.server.addPubSubTopic(key.Key.String(), true) - if err != nil { - return nil, p.rollbackAddPubSubTopics(addedTopics, err) - } - addedTopics = append(addedTopics, key.Key.String()) - } - } - - if err = txn.Commit(p.ctx); err != nil { - err = p.rollbackAddPubSubTopics(addedTopics, err) - return nil, p.rollbackRemovePubSubTopics(removedTopics, err) - } - - return &pb.RemoveP2PCollectionsReply{}, nil -} - -// GetAllP2PCollections gets all the collectionIDs from the pubsup topics -func (p *Peer) GetAllP2PCollections( - ctx context.Context, - req *pb.GetAllP2PCollectionsRequest, -) (*pb.GetAllP2PCollectionsReply, error) { - log.Debug(ctx, "Received GetAllP2PCollections request") - - txn, err := p.db.NewTxn(p.ctx, false) - if err != nil { - return nil, err - } - store := p.db.WithTxn(txn) - - collections, err := p.db.GetAllP2PCollections(p.ctx) - if err != nil { - txn.Discard(p.ctx) - return nil, err - } - - pbCols := []*pb.GetAllP2PCollectionsReply_Collection{} - for _, colID := range collections { - col, err := store.GetCollectionBySchemaID(p.ctx, colID) - if err != nil { - txn.Discard(p.ctx) - return nil, err - } - pbCols = append(pbCols, &pb.GetAllP2PCollectionsReply_Collection{ - Id: colID, - Name: col.Name(), - }) - } - - return &pb.GetAllP2PCollectionsReply{ - Collections: pbCols, - }, txn.Commit(p.ctx) -} diff --git a/net/peer_collection.go b/net/peer_collection.go new file mode 100644 index 0000000000..58f83f7aa8 --- /dev/null +++ b/net/peer_collection.go @@ -0,0 +1,179 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "context" + + dsq "github.com/ipfs/go-datastore/query" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" +) + +const marker = byte(0xff) + +func (p *Peer) AddP2PCollections(ctx context.Context, collectionIDs []string) error { + txn, err := p.db.NewTxn(p.ctx, false) + if err != nil { + return err + } + defer txn.Discard(p.ctx) + + // first let's make sure the collections actually exists + storeCollections := []client.Collection{} + for _, col := range collectionIDs { + storeCol, err := p.db.WithTxn(txn).GetCollectionsBySchemaRoot(p.ctx, col) + if err != nil { + return err + } + if len(storeCol) == 0 { + return client.NewErrCollectionNotFoundForSchema(col) + } + storeCollections = append(storeCollections, storeCol...) + } + + // Ensure we can add all the collections to the store on the transaction + // before adding to topics. + for _, col := range storeCollections { + key := core.NewP2PCollectionKey(col.SchemaRoot()) + err = txn.Systemstore().Put(ctx, key.ToDS(), []byte{marker}) + if err != nil { + return err + } + } + + // Add pubsub topics and remove them if we get an error. + addedTopics := []string{} + for _, col := range collectionIDs { + err = p.server.addPubSubTopic(col, true) + if err != nil { + return p.rollbackAddPubSubTopics(addedTopics, err) + } + addedTopics = append(addedTopics, col) + } + + // After adding the collection topics, we remove the collections' documents + // from the pubsub topics to avoid receiving duplicate events. + removedTopics := []string{} + for _, col := range storeCollections { + keyChan, err := col.GetAllDocKeys(p.ctx) + if err != nil { + return err + } + for key := range keyChan { + err := p.server.removePubSubTopic(key.Key.String()) + if err != nil { + return p.rollbackRemovePubSubTopics(removedTopics, err) + } + removedTopics = append(removedTopics, key.Key.String()) + } + } + + if err = txn.Commit(p.ctx); err != nil { + err = p.rollbackRemovePubSubTopics(removedTopics, err) + return p.rollbackAddPubSubTopics(addedTopics, err) + } + + return nil +} + +func (p *Peer) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { + txn, err := p.db.NewTxn(p.ctx, false) + if err != nil { + return err + } + defer txn.Discard(p.ctx) + + // first let's make sure the collections actually exists + storeCollections := []client.Collection{} + for _, col := range collectionIDs { + storeCol, err := p.db.WithTxn(txn).GetCollectionsBySchemaRoot(p.ctx, col) + if err != nil { + return err + } + if len(storeCol) == 0 { + return client.NewErrCollectionNotFoundForSchema(col) + } + storeCollections = append(storeCollections, storeCol...) + } + + // Ensure we can remove all the collections to the store on the transaction + // before adding to topics. + for _, col := range storeCollections { + key := core.NewP2PCollectionKey(col.SchemaRoot()) + err = txn.Systemstore().Delete(ctx, key.ToDS()) + if err != nil { + return err + } + } + + // Remove pubsub topics and add them back if we get an error. + removedTopics := []string{} + for _, col := range collectionIDs { + err = p.server.removePubSubTopic(col) + if err != nil { + return p.rollbackRemovePubSubTopics(removedTopics, err) + } + removedTopics = append(removedTopics, col) + } + + // After removing the collection topics, we add back the collections' documents + // to the pubsub topics. + addedTopics := []string{} + for _, col := range storeCollections { + keyChan, err := col.GetAllDocKeys(p.ctx) + if err != nil { + return err + } + for key := range keyChan { + err := p.server.addPubSubTopic(key.Key.String(), true) + if err != nil { + return p.rollbackAddPubSubTopics(addedTopics, err) + } + addedTopics = append(addedTopics, key.Key.String()) + } + } + + if err = txn.Commit(p.ctx); err != nil { + err = p.rollbackAddPubSubTopics(addedTopics, err) + return p.rollbackRemovePubSubTopics(removedTopics, err) + } + + return nil +} + +func (p *Peer) GetAllP2PCollections(ctx context.Context) ([]string, error) { + txn, err := p.db.NewTxn(p.ctx, true) + if err != nil { + return nil, err + } + defer txn.Discard(p.ctx) + + query := dsq.Query{ + Prefix: core.NewP2PCollectionKey("").ToString(), + } + results, err := txn.Systemstore().Query(ctx, query) + if err != nil { + return nil, err + } + + collectionIDs := []string{} + for result := range results.Next() { + key, err := core.NewP2PCollectionKeyFromString(result.Key) + if err != nil { + return nil, err + } + collectionIDs = append(collectionIDs, key.CollectionID) + } + + return collectionIDs, nil +} diff --git a/net/peer_replicator.go b/net/peer_replicator.go new file mode 100644 index 0000000000..c444dee58f --- /dev/null +++ b/net/peer_replicator.go @@ -0,0 +1,207 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package net + +import ( + "context" + "encoding/json" + + dsq "github.com/ipfs/go-datastore/query" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/libp2p/go-libp2p/core/peerstore" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/core" +) + +func (p *Peer) SetReplicator(ctx context.Context, rep client.Replicator) error { + p.mu.Lock() + defer p.mu.Unlock() + + txn, err := p.db.NewTxn(ctx, false) + if err != nil { + return err + } + defer txn.Discard(ctx) + + if rep.Info.ID == p.host.ID() { + return ErrSelfTargetForReplicator + } + if err := rep.Info.ID.Validate(); err != nil { + return err + } + + var collections []client.Collection + switch { + case len(rep.Schemas) > 0: + // if specific collections are chosen get them by name + for _, name := range rep.Schemas { + col, err := p.db.WithTxn(txn).GetCollectionByName(ctx, name) + if err != nil { + return NewErrReplicatorCollections(err) + } + collections = append(collections, col) + } + + default: + // default to all collections + collections, err = p.db.WithTxn(txn).GetAllCollections(ctx) + if err != nil { + return NewErrReplicatorCollections(err) + } + } + rep.Schemas = nil + + // Add the destination's peer multiaddress in the peerstore. + // This will be used during connection and stream creation by libp2p. + p.host.Peerstore().AddAddrs(rep.Info.ID, rep.Info.Addrs, peerstore.PermanentAddrTTL) + + var added []client.Collection + for _, col := range collections { + reps, exists := p.replicators[col.SchemaRoot()] + if !exists { + p.replicators[col.SchemaRoot()] = make(map[peer.ID]struct{}) + } + if _, exists := reps[rep.Info.ID]; !exists { + // keep track of newly added collections so we don't + // push logs to a replicator peer multiple times. + p.replicators[col.SchemaRoot()][rep.Info.ID] = struct{}{} + added = append(added, col) + } + rep.Schemas = append(rep.Schemas, col.SchemaRoot()) + } + + // persist replicator to the datastore + repBytes, err := json.Marshal(rep) + if err != nil { + return err + } + key := core.NewReplicatorKey(rep.Info.ID.String()) + err = txn.Systemstore().Put(ctx, key.ToDS(), repBytes) + if err != nil { + return err + } + + // push all collection documents to the replicator peer + for _, col := range added { + keysCh, err := col.WithTxn(txn).GetAllDocKeys(ctx) + if err != nil { + return NewErrReplicatorDocKey(err, col.Name(), rep.Info.ID) + } + p.pushToReplicator(ctx, txn, col, keysCh, rep.Info.ID) + } + + return txn.Commit(ctx) +} + +func (p *Peer) DeleteReplicator(ctx context.Context, rep client.Replicator) error { + p.mu.Lock() + defer p.mu.Unlock() + + txn, err := p.db.NewTxn(ctx, false) + if err != nil { + return err + } + defer txn.Discard(ctx) + + if rep.Info.ID == p.host.ID() { + return ErrSelfTargetForReplicator + } + if err := rep.Info.ID.Validate(); err != nil { + return err + } + + var collections []client.Collection + switch { + case len(rep.Schemas) > 0: + // if specific collections are chosen get them by name + for _, name := range rep.Schemas { + col, err := p.db.WithTxn(txn).GetCollectionByName(ctx, name) + if err != nil { + return NewErrReplicatorCollections(err) + } + collections = append(collections, col) + } + // make sure the replicator exists in the datastore + key := core.NewReplicatorKey(rep.Info.ID.String()) + _, err = txn.Systemstore().Get(ctx, key.ToDS()) + if err != nil { + return err + } + + default: + // default to all collections + collections, err = p.db.WithTxn(txn).GetAllCollections(ctx) + if err != nil { + return NewErrReplicatorCollections(err) + } + } + rep.Schemas = nil + + schemaMap := make(map[string]struct{}) + for _, col := range collections { + schemaMap[col.SchemaRoot()] = struct{}{} + } + + // update replicators and add remaining schemas to rep + for key, val := range p.replicators { + if _, exists := val[rep.Info.ID]; exists { + if _, toDelete := schemaMap[key]; toDelete { + delete(p.replicators[key], rep.Info.ID) + } else { + rep.Schemas = append(rep.Schemas, key) + } + } + } + + if len(rep.Schemas) == 0 { + // Remove the destination's peer multiaddress in the peerstore. + p.host.Peerstore().ClearAddrs(rep.Info.ID) + } + + // persist the replicator to the store, deleting it if no schemas remain + key := core.NewReplicatorKey(rep.Info.ID.String()) + if len(rep.Schemas) == 0 { + return txn.Systemstore().Delete(ctx, key.ToDS()) + } + repBytes, err := json.Marshal(rep) + if err != nil { + return err + } + return txn.Systemstore().Put(ctx, key.ToDS(), repBytes) +} + +func (p *Peer) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { + txn, err := p.db.NewTxn(ctx, true) + if err != nil { + return nil, err + } + defer txn.Discard(ctx) + + // create collection system prefix query + query := dsq.Query{ + Prefix: core.NewReplicatorKey("").ToString(), + } + results, err := txn.Systemstore().Query(ctx, query) + if err != nil { + return nil, err + } + + var reps []client.Replicator + for result := range results.Next() { + var rep client.Replicator + if err = json.Unmarshal(result.Value, &rep); err != nil { + return nil, err + } + reps = append(reps, rep) + } + return reps, nil +} diff --git a/net/peer_test.go b/net/peer_test.go index 092e908cd2..1ce11e238f 100644 --- a/net/peer_test.go +++ b/net/peer_test.go @@ -11,7 +11,6 @@ package net import ( - "bytes" "context" "testing" "time" @@ -22,11 +21,9 @@ import ( libp2p "github.com/libp2p/go-libp2p" pubsub "github.com/libp2p/go-libp2p-pubsub" "github.com/libp2p/go-libp2p/core/peer" - ma "github.com/multiformats/go-multiaddr" mh "github.com/multiformats/go-multihash" - "github.com/stretchr/testify/assert" + rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" "github.com/stretchr/testify/require" - rpc "github.com/textileio/go-libp2p-pubsub-rpc" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" @@ -35,8 +32,6 @@ import ( "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" - "github.com/sourcenetwork/defradb/logging" - pb "github.com/sourcenetwork/defradb/net/pb" netutils "github.com/sourcenetwork/defradb/net/utils" ) @@ -117,7 +112,7 @@ func createCID(doc *client.Document) (cid.Cid, error) { return c, nil } -const randomMultiaddr = "/ip4/0.0.0.0/tcp/0" +const randomMultiaddr = "/ip4/127.0.0.1/tcp/0" func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { store := memory.NewDatastore(ctx) @@ -126,14 +121,11 @@ func newTestNode(ctx context.Context, t *testing.T) (client.DB, *Node) { cfg := config.DefaultConfig() cfg.Net.P2PAddress = randomMultiaddr - cfg.Net.RPCAddress = "0.0.0.0:0" - cfg.Net.TCPAddress = randomMultiaddr n, err := NewNode( ctx, db, WithConfig(cfg), - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -149,7 +141,7 @@ func TestNewPeer_NoError(t *testing.T) { h, err := libp2p.New() require.NoError(t, err) - _, err = NewPeer(ctx, db, h, nil, nil, nil, nil, nil) + _, err = NewPeer(ctx, db, h, nil, nil, nil, nil) require.NoError(t, err) } @@ -159,7 +151,7 @@ func TestNewPeer_NoDB_NilDBError(t *testing.T) { h, err := libp2p.New() require.NoError(t, err) - _, err = NewPeer(ctx, nil, h, nil, nil, nil, nil, nil) + _, err = NewPeer(ctx, nil, h, nil, nil, nil, nil) require.ErrorIs(t, err, ErrNilDB) } @@ -198,7 +190,7 @@ func TestNewPeer_WithExistingTopic_TopicAlreadyExistsError(t *testing.T) { _, err = rpc.NewTopic(ctx, ps, h.ID(), doc.Key().String(), true) require.NoError(t, err) - _, err = NewPeer(ctx, db, h, nil, ps, nil, nil, nil) + _, err = NewPeer(ctx, db, h, nil, ps, nil, nil) require.ErrorContains(t, err, "topic already exists") } @@ -209,7 +201,7 @@ func TestStartAndClose_NoError(t *testing.T) { err := n.Start() require.NoError(t, err) - db.Close(ctx) + db.Close() } func TestStart_WithKnownPeer_NoError(t *testing.T) { @@ -226,16 +218,12 @@ func TestStart_WithKnownPeer_NoError(t *testing.T) { ctx, db1, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) n2, err := NewNode( ctx, db2, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -243,13 +231,13 @@ func TestStart_WithKnownPeer_NoError(t *testing.T) { if err != nil { t.Fatal(err) } - n2.Boostrap(addrs) + n2.Bootstrap(addrs) err = n2.Start() require.NoError(t, err) - db1.Close(ctx) - db2.Close(ctx) + db1.Close() + db2.Close() } func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { @@ -266,16 +254,12 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { ctx, db1, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) n2, err := NewNode( ctx, db2, WithListenP2PAddrStrings("/ip4/0.0.0.0/tcp/0"), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -283,16 +267,8 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { if err != nil { t.Fatal(err) } - n2.Boostrap(addrs) - - b := &bytes.Buffer{} - - log.ApplyConfig(logging.Config{ - Pipe: b, - }) - - err = n1.Close() - require.NoError(t, err) + n2.Bootstrap(addrs) + n1.Close() // give time for n1 to close time.Sleep(100 * time.Millisecond) @@ -300,21 +276,8 @@ func TestStart_WithOfflineKnownPeer_NoError(t *testing.T) { err = n2.Start() require.NoError(t, err) - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - assert.Equal(t, "Failure while reconnecting to a known peer", logLines[0]["msg"]) - - // reset logger - log = logging.MustNewLogger("defra.net") - - db1.Close(ctx) - db2.Close(ctx) + db1.Close() + db2.Close() } func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { @@ -327,15 +290,13 @@ func TestStart_WithNoUpdateChannel_NilUpdateChannelError(t *testing.T) { ctx, db, WithPubSub(true), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) err = n.Start() require.ErrorIs(t, err, ErrNilUpdateChannel) - db.Close(ctx) + db.Close() } func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) { @@ -348,8 +309,6 @@ func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) { ctx, db, WithPubSub(true), - // WithDataPath() is a required option with the current implementation of key management - WithDataPath(t.TempDir()), ) require.NoError(t, err) @@ -358,7 +317,7 @@ func TestStart_WitClosedUpdateChannel_ClosedChannelError(t *testing.T) { err = n.Start() require.ErrorContains(t, err, "cannot subscribe to a closed channel") - db.Close(ctx) + db.Close() } func TestRegisterNewDocument_NoError(t *testing.T) { @@ -380,7 +339,7 @@ func TestRegisterNewDocument_NoError(t *testing.T) { cid, err := createCID(doc) require.NoError(t, err) - err = n.RegisterNewDocument(ctx, doc.Key(), cid, &EmptyNode{}, col.SchemaID()) + err = n.RegisterNewDocument(ctx, doc.Key(), cid, &EmptyNode{}, col.SchemaRoot()) require.NoError(t, err) } @@ -406,7 +365,7 @@ func TestRegisterNewDocument_RPCTopicAlreadyRegisteredError(t *testing.T) { cid, err := createCID(doc) require.NoError(t, err) - err = n.RegisterNewDocument(ctx, doc.Key(), cid, &EmptyNode{}, col.SchemaID()) + err = n.RegisterNewDocument(ctx, doc.Key(), cid, &EmptyNode{}, col.SchemaRoot()) require.Equal(t, err.Error(), "creating topic: joining topic: topic already exists") } @@ -420,20 +379,17 @@ func TestSetReplicator_NoError(t *testing.T) { }`) require.NoError(t, err) - addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") require.NoError(t, err) - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - Collections: []string{"User"}, - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: *info, + Schemas: []string{"User"}, + }) require.NoError(t, err) } -func TestSetReplicator_WithInvalidAddress_InvalidArgumentError(t *testing.T) { +func TestSetReplicator_WithInvalidAddress_EmptyPeerIDError(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) @@ -443,32 +399,26 @@ func TestSetReplicator_WithInvalidAddress_InvalidArgumentError(t *testing.T) { }`) require.NoError(t, err) - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: []byte("/some/invalid/address"), - Collections: []string{"User"}, - }, - ) - require.ErrorContains(t, err, "InvalidArgument") + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: peer.AddrInfo{}, + Schemas: []string{"User"}, + }) + require.ErrorContains(t, err, "empty peer ID") } func TestSetReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) - db.Close(ctx) + db.Close() - addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") require.NoError(t, err) - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - Collections: []string{"User"}, - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: *info, + Schemas: []string{"User"}, + }) require.ErrorContains(t, err, "datastore closed") } @@ -476,17 +426,14 @@ func TestSetReplicator_WithUndefinedCollection_KeyNotFoundError(t *testing.T) { ctx := context.Background() _, n := newTestNode(ctx, t) - addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") require.NoError(t, err) - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - Collections: []string{"User"}, - }, - ) - require.ErrorContains(t, err, "failed to get collection for replicator: datastore: key not found") + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: *info, + Schemas: []string{"User"}, + }) + require.ErrorContains(t, err, "failed to get collections for replicator: datastore: key not found") } func TestSetReplicator_ForAllCollections_NoError(t *testing.T) { @@ -499,15 +446,12 @@ func TestSetReplicator_ForAllCollections_NoError(t *testing.T) { }`) require.NoError(t, err) - addr, err := ma.NewMultiaddr("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") + info, err := peer.AddrInfoFromString("/ip4/0.0.0.0/tcp/0/p2p/QmYyQSo1c1Ym7orWxLYvCrM2EmxFTANf8wXmmE7DWjhx5N") require.NoError(t, err) - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: *info, + }) require.NoError(t, err) } @@ -535,41 +479,24 @@ func TestPushToReplicator_SingleDocumentNoPeer_FailedToReplicateLogError(t *test txn, err := db.NewTxn(ctx, true) require.NoError(t, err) - b := &bytes.Buffer{} - - log.ApplyConfig(logging.Config{ - Pipe: b, - }) - n.pushToReplicator(ctx, txn, col, keysCh, n.PeerID()) - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 1 { - t.Fatalf("expecting exactly 1 log line but got %d lines", len(logLines)) - } - assert.Equal(t, "Failed to replicate log", logLines[0]["msg"]) - - // reset logger - log = logging.MustNewLogger("defra.net") } func TestDeleteReplicator_WithDBClosed_DataStoreClosedError(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) - db.Close(ctx) + info := peer.AddrInfo{ + ID: n.PeerID(), + Addrs: n.ListenAddrs(), + } - _, err := n.Peer.DeleteReplicator( - ctx, - &pb.DeleteReplicatorRequest{ - PeerID: []byte(n.PeerID()), - Collections: []string{"User"}, - }, - ) + db.Close() + + err := n.Peer.DeleteReplicator(ctx, client.Replicator{ + Info: info, + Schemas: []string{"User"}, + }) require.ErrorContains(t, err, "datastore closed") } @@ -577,13 +504,10 @@ func TestDeleteReplicator_WithTargetSelf_SelfTargetForReplicatorError(t *testing ctx := context.Background() _, n := newTestNode(ctx, t) - _, err := n.Peer.DeleteReplicator( - ctx, - &pb.DeleteReplicatorRequest{ - PeerID: []byte(n.PeerID()), - Collections: []string{"User"}, - }, - ) + err := n.Peer.DeleteReplicator(ctx, client.Replicator{ + Info: n.PeerInfo(), + Schemas: []string{"User"}, + }) require.ErrorIs(t, err, ErrSelfTargetForReplicator) } @@ -593,14 +517,11 @@ func TestDeleteReplicator_WithInvalidCollection_KeyNotFoundError(t *testing.T) { _, n2 := newTestNode(ctx, t) - _, err := n.Peer.DeleteReplicator( - ctx, - &pb.DeleteReplicatorRequest{ - PeerID: []byte(n2.PeerID()), - Collections: []string{"User"}, - }, - ) - require.ErrorContains(t, err, "failed to get collection for replicator: datastore: key not found") + err := n.Peer.DeleteReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + Schemas: []string{"User"}, + }) + require.ErrorContains(t, err, "failed to get collections for replicator: datastore: key not found") } func TestDeleteReplicator_WithCollectionAndPreviouslySetReplicator_NoError(t *testing.T) { @@ -615,23 +536,14 @@ func TestDeleteReplicator_WithCollectionAndPreviouslySetReplicator_NoError(t *te _, n2 := newTestNode(ctx, t) - addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) - require.NoError(t, err) - - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) - _, err = n.Peer.DeleteReplicator( - ctx, - &pb.DeleteReplicatorRequest{ - PeerID: []byte(n2.PeerID()), - }, - ) + err = n.Peer.DeleteReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) } @@ -641,12 +553,9 @@ func TestDeleteReplicator_WithNoCollection_NoError(t *testing.T) { _, n2 := newTestNode(ctx, t) - _, err := n.Peer.DeleteReplicator( - ctx, - &pb.DeleteReplicatorRequest{ - PeerID: []byte(n2.PeerID()), - }, - ) + err := n.Peer.DeleteReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) } @@ -662,13 +571,10 @@ func TestDeleteReplicator_WithNotSetReplicator_KeyNotFoundError(t *testing.T) { _, n2 := newTestNode(ctx, t) - _, err = n.Peer.DeleteReplicator( - ctx, - &pb.DeleteReplicatorRequest{ - PeerID: []byte(n2.PeerID()), - Collections: []string{"User"}, - }, - ) + err = n.Peer.DeleteReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + Schemas: []string{"User"}, + }) require.ErrorContains(t, err, "datastore: key not found") } @@ -684,42 +590,25 @@ func TestGetAllReplicator_WithReplicator_NoError(t *testing.T) { _, n2 := newTestNode(ctx, t) - addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) - require.NoError(t, err) - - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) - require.NoError(t, err) - - reps, err := n.Peer.GetAllReplicators( - ctx, - &pb.GetAllReplicatorRequest{}, - ) - require.NoError(t, err) - - info, err := peer.AddrInfoFromP2pAddr(addr) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) - id, err := info.ID.MarshalBinary() + reps, err := n.Peer.GetAllReplicators(ctx) require.NoError(t, err) - require.Equal(t, id, reps.Replicators[0].Info.Id) + require.Len(t, reps, 1) + require.Equal(t, n2.PeerInfo().ID, reps[0].Info.ID) } func TestGetAllReplicator_WithDBClosed_DatastoreClosedError(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) - db.Close(ctx) + db.Close() - _, err := n.Peer.GetAllReplicators( - ctx, - &pb.GetAllReplicatorRequest{}, - ) + _, err := n.Peer.GetAllReplicators(ctx) require.ErrorContains(t, err, "datastore closed") } @@ -727,7 +616,7 @@ func TestLoadReplicators_WithDBClosed_DatastoreClosedError(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) - db.Close(ctx) + db.Close() err := n.Peer.loadReplicators(ctx) require.ErrorContains(t, err, "datastore closed") @@ -745,15 +634,9 @@ func TestLoadReplicator_WithReplicator_NoError(t *testing.T) { _, n2 := newTestNode(ctx, t) - addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) - require.NoError(t, err) - - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) err = n.Peer.loadReplicators(ctx) @@ -772,15 +655,9 @@ func TestLoadReplicator_WithReplicatorAndEmptyReplicatorMap_NoError(t *testing.T _, n2 := newTestNode(ctx, t) - addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) - require.NoError(t, err) - - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) n.replicators = make(map[string]map[peer.ID]struct{}) @@ -793,12 +670,7 @@ func TestAddP2PCollections_WithInvalidCollectionID_NotFoundError(t *testing.T) { ctx := context.Background() _, n := newTestNode(ctx, t) - _, err := n.Peer.AddP2PCollections( - ctx, - &pb.AddP2PCollectionsRequest{ - Collections: []string{"invalid_collection"}, - }, - ) + err := n.Peer.AddP2PCollections(ctx, []string{"invalid_collection"}) require.Error(t, err, ds.ErrNotFound) } @@ -815,12 +687,7 @@ func TestAddP2PCollections_NoError(t *testing.T) { col, err := db.GetCollectionByName(ctx, "User") require.NoError(t, err) - _, err = n.Peer.AddP2PCollections( - ctx, - &pb.AddP2PCollectionsRequest{ - Collections: []string{col.SchemaID()}, - }, - ) + err = n.Peer.AddP2PCollections(ctx, []string{col.SchemaRoot()}) require.NoError(t, err) } @@ -828,12 +695,7 @@ func TestRemoveP2PCollectionsWithInvalidCollectionID(t *testing.T) { ctx := context.Background() _, n := newTestNode(ctx, t) - _, err := n.Peer.RemoveP2PCollections( - ctx, - &pb.RemoveP2PCollectionsRequest{ - Collections: []string{"invalid_collection"}, - }, - ) + err := n.Peer.RemoveP2PCollections(ctx, []string{"invalid_collection"}) require.Error(t, err, ds.ErrNotFound) } @@ -850,12 +712,7 @@ func TestRemoveP2PCollections(t *testing.T) { col, err := db.GetCollectionByName(ctx, "User") require.NoError(t, err) - _, err = n.Peer.RemoveP2PCollections( - ctx, - &pb.RemoveP2PCollectionsRequest{ - Collections: []string{col.SchemaID()}, - }, - ) + err = n.Peer.RemoveP2PCollections(ctx, []string{col.SchemaRoot()}) require.NoError(t, err) } @@ -863,12 +720,9 @@ func TestGetAllP2PCollectionsWithNoCollections(t *testing.T) { ctx := context.Background() _, n := newTestNode(ctx, t) - cols, err := n.Peer.GetAllP2PCollections( - ctx, - &pb.GetAllP2PCollectionsRequest{}, - ) + cols, err := n.Peer.GetAllP2PCollections(ctx) require.NoError(t, err) - require.Len(t, cols.Collections, 0) + require.Len(t, cols, 0) } func TestGetAllP2PCollections(t *testing.T) { @@ -884,25 +738,12 @@ func TestGetAllP2PCollections(t *testing.T) { col, err := db.GetCollectionByName(ctx, "User") require.NoError(t, err) - _, err = n.Peer.AddP2PCollections( - ctx, - &pb.AddP2PCollectionsRequest{ - Collections: []string{col.SchemaID()}, - }, - ) + err = n.Peer.AddP2PCollections(ctx, []string{col.SchemaRoot()}) require.NoError(t, err) - cols, err := n.Peer.GetAllP2PCollections( - ctx, - &pb.GetAllP2PCollectionsRequest{}, - ) + cols, err := n.Peer.GetAllP2PCollections(ctx) require.NoError(t, err) - require.Equal(t, &pb.GetAllP2PCollectionsReply{ - Collections: []*pb.GetAllP2PCollectionsReply_Collection{{ - Id: col.SchemaID(), - Name: col.Name(), - }}, - }, cols) + require.ElementsMatch(t, []string{col.SchemaRoot()}, cols) } func TestHandleDocCreateLog_NoError(t *testing.T) { @@ -937,11 +778,11 @@ func TestHandleDocCreateLog_NoError(t *testing.T) { require.NoError(t, err) err = n.handleDocCreateLog(events.Update{ - DocKey: doc.Key().String(), - Cid: docCid, - SchemaID: col.SchemaID(), - Block: node, - Priority: 0, + DocKey: doc.Key().String(), + Cid: docCid, + SchemaRoot: col.SchemaRoot(), + Block: node, + Priority: 0, }) require.NoError(t, err) } @@ -979,8 +820,8 @@ func TestHandleDocCreateLog_WithExistingTopic_TopicExistsError(t *testing.T) { require.NoError(t, err) err = n.handleDocCreateLog(events.Update{ - DocKey: doc.Key().String(), - SchemaID: col.SchemaID(), + DocKey: doc.Key().String(), + SchemaRoot: col.SchemaRoot(), }) require.ErrorContains(t, err, "topic already exists") } @@ -1017,11 +858,11 @@ func TestHandleDocUpdateLog_NoError(t *testing.T) { require.NoError(t, err) err = n.handleDocUpdateLog(events.Update{ - DocKey: doc.Key().String(), - Cid: docCid, - SchemaID: col.SchemaID(), - Block: node, - Priority: 0, + DocKey: doc.Key().String(), + Cid: docCid, + SchemaRoot: col.SchemaRoot(), + Block: node, + Priority: 0, }) require.NoError(t, err) } @@ -1071,10 +912,10 @@ func TestHandleDocUpdateLog_WithExistingDockeyTopic_TopicExistsError(t *testing. require.NoError(t, err) err = n.handleDocUpdateLog(events.Update{ - DocKey: doc.Key().String(), - Cid: docCid, - SchemaID: col.SchemaID(), - Block: node, + DocKey: doc.Key().String(), + Cid: docCid, + SchemaRoot: col.SchemaRoot(), + Block: node, }) require.ErrorContains(t, err, "topic already exists") } @@ -1110,14 +951,14 @@ func TestHandleDocUpdateLog_WithExistingSchemaTopic_TopicExistsError(t *testing. node, err := makeNode(delta, []cid.Cid{docCid}) require.NoError(t, err) - _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), col.SchemaID(), true) + _, err = rpc.NewTopic(ctx, n.ps, n.host.ID(), col.SchemaRoot(), true) require.NoError(t, err) err = n.handleDocUpdateLog(events.Update{ - DocKey: doc.Key().String(), - Cid: docCid, - SchemaID: col.SchemaID(), - Block: node, + DocKey: doc.Key().String(), + Cid: docCid, + SchemaRoot: col.SchemaRoot(), + Block: node, }) require.ErrorContains(t, err, "topic already exists") } @@ -1134,15 +975,9 @@ func TestPushLogToReplicator_WithReplicator_FailedPushingLogError(t *testing.T) _, n2 := newTestNode(ctx, t) - addr, err := ma.NewMultiaddr(n2.host.Addrs()[0].String() + "/p2p/" + n2.PeerID().String()) - require.NoError(t, err) - - _, err = n.Peer.SetReplicator( - ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err = n.Peer.SetReplicator(ctx, client.Replicator{ + Info: n2.PeerInfo(), + }) require.NoError(t, err) col, err := db.GetCollectionByName(ctx, "User") @@ -1167,10 +1002,10 @@ func TestPushLogToReplicator_WithReplicator_FailedPushingLogError(t *testing.T) require.NoError(t, err) n.pushLogToReplicators(ctx, events.Update{ - DocKey: doc.Key().String(), - Cid: docCid, - SchemaID: col.SchemaID(), - Block: node, + DocKey: doc.Key().String(), + Cid: docCid, + SchemaRoot: col.SchemaRoot(), + Block: node, }) } diff --git a/net/process.go b/net/process.go index a2fd446cfe..85748090ff 100644 --- a/net/process.go +++ b/net/process.go @@ -13,6 +13,7 @@ package net import ( + "container/list" "context" "fmt" "sync" @@ -29,57 +30,95 @@ import ( "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/events" "github.com/sourcenetwork/defradb/logging" - "github.com/sourcenetwork/defradb/merkle/clock" "github.com/sourcenetwork/defradb/merkle/crdt" ) -// processNode is a general utility for processing various kinds -// of CRDT blocks -func (p *Peer) processLog( - ctx context.Context, +type blockProcessor struct { + *Peer + txn datastore.Txn + col client.Collection + dsKey core.DataStoreKey + getter ipld.NodeGetter + // List of composite blocks to eventually merge + composites *list.List +} + +func newBlockProcessor( + p *Peer, txn datastore.Txn, col client.Collection, dsKey core.DataStoreKey, - field string, - nd ipld.Node, getter ipld.NodeGetter, - removeChildren bool, -) ([]cid.Cid, error) { - log.Debug(ctx, "Running processLog") +) *blockProcessor { + return &blockProcessor{ + Peer: p, + composites: list.New(), + txn: txn, + col: col, + dsKey: dsKey, + getter: getter, + } +} - crdt, err := initCRDTForType(ctx, txn, col, dsKey, field) - if err != nil { - return nil, err +// mergeBlock runs trough the list of composite blocks and sends them for processing. +func (bp *blockProcessor) mergeBlocks(ctx context.Context) { + for e := bp.composites.Front(); e != nil; e = e.Next() { + nd := e.Value.(ipld.Node) + err := bp.processBlock(ctx, nd, "") + if err != nil { + log.ErrorE( + ctx, + "Failed to process block", + err, + logging.NewKV("DocKey", bp.dsKey.DocKey), + logging.NewKV("CID", nd.Cid()), + ) + } } +} - delta, err := crdt.DeltaDecode(nd) +// processBlock merges the block and its children to the datastore and sets the head accordingly. +func (bp *blockProcessor) processBlock(ctx context.Context, nd ipld.Node, field string) error { + crdt, err := initCRDTForType(ctx, bp.txn, bp.col, bp.dsKey, field) if err != nil { - return nil, errors.Wrap("failed to decode delta object", err) + return err } - - log.Debug( - ctx, - "Processing PushLog request", - logging.NewKV("Datastore key", dsKey), - logging.NewKV("CID", nd.Cid()), - ) - - if err := txn.DAGstore().Put(ctx, nd); err != nil { - return nil, err + delta, err := crdt.DeltaDecode(nd) + if err != nil { + return errors.Wrap("failed to decode delta object", err) } - ng := p.createNodeGetter(crdt, getter) - cids, err := crdt.Clock().ProcessNode(ctx, ng, delta, nd) + err = crdt.Clock().ProcessNode(ctx, delta, nd) if err != nil { - return nil, err + return err } - if removeChildren { - // mark this obj as done - p.queuedChildren.Remove(nd.Cid()) + for _, link := range nd.Links() { + if link.Name == core.HEAD { + continue + } + + block, err := bp.txn.DAGstore().Get(ctx, link.Cid) + if err != nil { + return err + } + nd, err := dag.DecodeProtobufBlock(block) + if err != nil { + return err + } + + if err := bp.processBlock(ctx, nd, link.Name); err != nil { + log.ErrorE( + ctx, + "Failed to process block", + err, + logging.NewKV("DocKey", bp.dsKey.DocKey), + logging.NewKV("CID", nd.Cid()), + ) + } } - return cids, nil + return nil } func initCRDTForType( @@ -102,7 +141,7 @@ func initCRDTForType( core.COMPOSITE_NAMESPACE, ) } else { - fd, ok := description.Schema.GetField(field) + fd, ok := col.Schema().GetField(field) if !ok { return nil, errors.New(fmt.Sprintf("Couldn't find field %s for doc %s", field, dsKey)) } @@ -113,7 +152,7 @@ func initCRDTForType( log.Debug(ctx, "Got CRDT Type", logging.NewKV("CType", ctype), logging.NewKV("Field", field)) return crdt.DefaultFactory.InstanceWithStores( txn, - core.NewCollectionSchemaVersionKey(col.Schema().VersionID), + core.NewCollectionSchemaVersionKey(col.Schema().VersionID, col.ID()), events.EmptyUpdateChannel, ctype, key, @@ -129,88 +168,72 @@ func decodeBlockBuffer(buf []byte, cid cid.Cid) (ipld.Node, error) { return ipld.Decode(blk, dag.DecodeProtobufBlock) } -func (p *Peer) createNodeGetter( - crdt crdt.MerkleCRDT, - getter ipld.NodeGetter, -) *clock.CrdtNodeGetter { - return &clock.CrdtNodeGetter{ - NodeGetter: getter, - DeltaExtractor: crdt.DeltaDecode, +// processRemoteBlock stores the block in the DAG store and initiates a sync of the block's children. +func (bp *blockProcessor) processRemoteBlock( + ctx context.Context, + session *sync.WaitGroup, + nd ipld.Node, + isComposite bool, +) error { + log.Debug(ctx, "Running processLog") + + if err := bp.txn.DAGstore().Put(ctx, nd); err != nil { + return err + } + + if isComposite { + bp.composites.PushFront(nd) } + + bp.handleChildBlocks(ctx, session, nd, isComposite) + + return nil } -func (p *Peer) handleChildBlocks( +func (bp *blockProcessor) handleChildBlocks( + ctx context.Context, session *sync.WaitGroup, - txn datastore.Txn, - col client.Collection, - dsKey core.DataStoreKey, - field string, nd ipld.Node, - children []cid.Cid, - getter ipld.NodeGetter, + isComposite bool, ) { - if len(children) == 0 { + if len(nd.Links()) == 0 { return } - ctx, cancel := context.WithTimeout(p.ctx, DAGSyncTimeout) + ctx, cancel := context.WithTimeout(ctx, DAGSyncTimeout) defer cancel() - for _, c := range children { - if !p.queuedChildren.Visit(c) { // reserve for processing + for _, link := range nd.Links() { + if !bp.queuedChildren.Visit(link.Cid) { // reserve for processing continue } - var fieldName string - // loop over our children to get the corresponding field names from the DAG - for _, l := range nd.Links() { - if c == l.Cid { - if l.Name != core.HEAD { - fieldName = l.Name - } - } - } - - // heads of subfields are still subfields, not composites - if fieldName == "" && field != "" { - fieldName = field - } - - // get object - cNode, err := getter.Get(ctx, c) + exist, err := bp.txn.DAGstore().Has(ctx, link.Cid) if err != nil { - log.ErrorE(ctx, "Failed to get node", err, logging.NewKV("CID", c)) + log.Error( + ctx, + "Failed to check for existing block", + logging.NewKV("CID", link.Cid), + logging.NewKV("ERROR", err), + ) + } + if exist { + log.Debug(ctx, "Already have block locally, skipping.", logging.NewKV("CID", link.Cid)) continue } - log.Debug( - ctx, - "Submitting new job to DAG queue", - logging.NewKV("Collection", col.Name()), - logging.NewKV("Datastore key", dsKey), - logging.NewKV("Field", fieldName), - logging.NewKV("CID", cNode.Cid())) - session.Add(1) job := &dagJob{ - collection: col, - dsKey: dsKey, - fieldName: fieldName, - session: session, - nodeGetter: getter, - node: cNode, - txn: txn, + session: session, + cid: link.Cid, + isComposite: isComposite && link.Name == core.HEAD, + bp: bp, } select { - case p.sendJobs <- job: - case <-p.ctx.Done(): + case bp.sendJobs <- job: + case <-bp.ctx.Done(): return // jump out } } - - // Clear up any children we failed to get from queued children - // for _, child := range children { - // p.queuedChildren.Remove(child) - // } } diff --git a/net/server.go b/net/server.go index 7322d845ad..b770e3cf2b 100644 --- a/net/server.go +++ b/net/server.go @@ -21,7 +21,7 @@ import ( format "github.com/ipfs/go-ipld-format" "github.com/libp2p/go-libp2p/core/event" libpeer "github.com/libp2p/go-libp2p/core/peer" - rpc "github.com/textileio/go-libp2p-pubsub-rpc" + rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" grpcpeer "google.golang.org/grpc/peer" @@ -104,7 +104,7 @@ func newServer(p *Peer, db client.DB, opts ...grpc.DialOption) (*server, error) i := 0 for _, col := range cols { // If we subscribed to the collection, we skip subscribing to the collection's dockeys. - if _, ok := colMap[col.SchemaID()]; ok { + if _, ok := colMap[col.SchemaRoot()]; ok { continue } keyChan, err := col.GetAllDocKeys(p.ctx) @@ -246,8 +246,8 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL return &pb.PushLogReply{}, nil } - schemaID := string(req.Body.SchemaID) - docKey := core.DataStoreKeyFromDocKey(dockey) + schemaRoot := string(req.Body.SchemaRoot) + dsKey := core.DataStoreKeyFromDocKey(dockey) var txnErr error for retry := 0; retry < s.peer.db.MaxTxnRetries(); retry++ { @@ -260,10 +260,16 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL defer txn.Discard(ctx) store := s.db.WithTxn(txn) - col, err := store.GetCollectionBySchemaID(ctx, schemaID) + // Currently a schema is the best way we have to link a push log request to a collection, + // this will change with https://github.com/sourcenetwork/defradb/issues/1085 + cols, err := store.GetCollectionsBySchemaRoot(ctx, schemaRoot) if err != nil { - return nil, errors.Wrap(fmt.Sprintf("Failed to get collection from schemaID %s", schemaID), err) + return nil, errors.Wrap(fmt.Sprintf("Failed to get collection from schemaRoot %s", schemaRoot), err) } + if len(cols) == 0 { + return nil, client.NewErrCollectionNotFoundForSchema(schemaRoot) + } + col := cols[0] // Create a new DAG service with the current transaction var getter format.NodeGetter = s.peer.newDAGSyncerTxn(txn) @@ -278,33 +284,25 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL return nil, errors.Wrap("failed to decode block to ipld.Node", err) } - cids, err := s.peer.processLog(ctx, txn, col, docKey, "", nd, getter, false) + var session sync.WaitGroup + bp := newBlockProcessor(s.peer, txn, col, dsKey, getter) + err = bp.processRemoteBlock(ctx, &session, nd, true) if err != nil { log.ErrorE( ctx, - "Failed to process PushLog node", + "Failed to process remote block", err, - logging.NewKV("DocKey", docKey), + logging.NewKV("DocKey", dsKey.DocKey), logging.NewKV("CID", cid), ) } + session.Wait() + bp.mergeBlocks(ctx) - // handleChildren - if len(cids) > 0 { // we have child nodes to get - log.Debug( - ctx, - "Handling children for log", - logging.NewKV("NChildren", len(cids)), - logging.NewKV("CID", cid), - ) - var session sync.WaitGroup - s.peer.handleChildBlocks(&session, txn, col, docKey, "", nd, cids, getter) - session.Wait() - // dagWorkers specific to the dockey will have been spawned within handleChildBlocks. - // Once we are done with the dag syncing process, we can get rid of those workers. - s.peer.closeJob <- docKey.DocKey - } else { - log.Debug(ctx, "No more children to process for log", logging.NewKV("CID", cid)) + // dagWorkers specific to the dockey will have been spawned within handleChildBlocks. + // Once we are done with the dag syncing process, we can get rid of those workers. + if s.peer.closeJob != nil { + s.peer.closeJob <- dsKey.DocKey } if txnErr = txn.Commit(ctx); txnErr != nil { @@ -316,8 +314,8 @@ func (s *server) PushLog(ctx context.Context, req *pb.PushLogRequest) (*pb.PushL // Once processed, subscribe to the dockey topic on the pubsub network unless we already // suscribe to the collection. - if !s.hasPubSubTopic(col.SchemaID()) { - err = s.addPubSubTopic(docKey.DocKey, true) + if !s.hasPubSubTopic(col.SchemaRoot()) { + err = s.addPubSubTopic(dsKey.DocKey, true) if err != nil { return nil, err } @@ -499,7 +497,7 @@ type addr struct{ id libpeer.ID } func (a addr) Network() string { return "libp2p" } // String returns the peer ID of this address in string form (B58-encoded). -func (a addr) String() string { return a.id.Pretty() } +func (a addr) String() string { return a.id.String() } // peerIDFromContext returns peer ID from the GRPC context func peerIDFromContext(ctx context.Context) (libpeer.ID, error) { diff --git a/net/server_test.go b/net/server_test.go index 993c12d875..6b5c3a3e20 100644 --- a/net/server_test.go +++ b/net/server_test.go @@ -11,25 +11,19 @@ package net import ( - "bufio" - "bytes" "context" - "encoding/json" - "io" "testing" "time" "github.com/libp2p/go-libp2p/core/event" "github.com/libp2p/go-libp2p/core/host" - "github.com/stretchr/testify/assert" + rpc "github.com/sourcenetwork/go-libp2p-pubsub-rpc" "github.com/stretchr/testify/require" - rpc "github.com/textileio/go-libp2p-pubsub-rpc" grpcpeer "google.golang.org/grpc/peer" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore/memory" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/logging" net_pb "github.com/sourcenetwork/defradb/net/pb" ) @@ -43,7 +37,8 @@ func TestNewServerSimple(t *testing.T) { func TestNewServerWithDBClosed(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) - db.Close(ctx) + db.Close() + _, err := newServer(n.Peer, db) require.ErrorIs(t, err, memory.ErrClosed) } @@ -79,7 +74,7 @@ func TestNewServerWithCollectionSubscribed(t *testing.T) { col, err := db.GetCollectionByName(ctx, "User") require.NoError(t, err) - err = n.AddP2PCollection(ctx, col.SchemaID()) + err = n.AddP2PCollections(ctx, []string{col.SchemaRoot()}) require.NoError(t, err) _, err = newServer(n.Peer, db) @@ -100,7 +95,7 @@ type mockCollection struct { client.Collection } -func (mCol *mockCollection) SchemaID() string { +func (mCol *mockCollection) SchemaRoot() string { return "mockColID" } func (mCol *mockCollection) GetAllDocKeys(ctx context.Context) (<-chan client.DocKeysResult, error) { @@ -190,46 +185,8 @@ func TestNewServerWithEmitterError(t *testing.T) { n.Peer.host = &mockHost{n.Peer.host} - b := &bytes.Buffer{} - - log.ApplyConfig(logging.Config{ - Pipe: b, - }) - _, err = newServer(n.Peer, db) require.NoError(t, err) - - logLines, err := parseLines(b) - if err != nil { - t.Fatal(err) - } - - if len(logLines) != 2 { - t.Fatalf("expecting exactly 2 log line but got %d lines", len(logLines)) - } - assert.Equal(t, "could not create event emitter", logLines[0]["msg"]) - assert.Equal(t, "could not create event emitter", logLines[1]["msg"]) - - // reset logger - log = logging.MustNewLogger("defra.net") -} - -func parseLines(r io.Reader) ([]map[string]any, error) { - fileScanner := bufio.NewScanner(r) - - fileScanner.Split(bufio.ScanLines) - - logLines := []map[string]any{} - for fileScanner.Scan() { - loggedLine := make(map[string]any) - err := json.Unmarshal(fileScanner.Bytes(), &loggedLine) - if err != nil { - return nil, err - } - logLines = append(logLines, loggedLine) - } - - return logLines, nil } func TestGetDocGraph(t *testing.T) { @@ -291,8 +248,10 @@ func TestDocQueue(t *testing.T) { func TestPushLog(t *testing.T) { ctx := context.Background() db, n := newTestNode(ctx, t) + err := n.Start() + require.NoError(t, err) - _, err := db.AddSchema(ctx, `type User { + _, err = db.AddSchema(ctx, `type User { name: String age: Int }`) @@ -315,10 +274,10 @@ func TestPushLog(t *testing.T) { _, err = n.server.PushLog(ctx, &net_pb.PushLogRequest{ Body: &net_pb.PushLogRequest_Body{ - DocKey: []byte(doc.Key().String()), - Cid: cid.Bytes(), - SchemaID: []byte(col.SchemaID()), - Creator: n.PeerID().String(), + DocKey: []byte(doc.Key().String()), + Cid: cid.Bytes(), + SchemaRoot: []byte(col.SchemaRoot()), + Creator: n.PeerID().String(), Log: &net_pb.Document_Log{ Block: block.RawData(), }, diff --git a/planner/commit.go b/planner/commit.go index e6216e2b43..b4fd3ed3c1 100644 --- a/planner/commit.go +++ b/planner/commit.go @@ -328,12 +328,17 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L fieldName = nil default: - c, err := n.planner.db.GetCollectionByVersionID(n.planner.ctx, schemaVersionId) + cols, err := n.planner.db.GetCollectionsByVersionID(n.planner.ctx, schemaVersionId) if err != nil { return core.Doc{}, nil, err } + if len(cols) == 0 { + return core.Doc{}, nil, client.NewErrCollectionNotFoundForSchemaVersion(schemaVersionId) + } - field, ok := c.Description().Schema.GetField(fieldName.(string)) + // Because we only care about the schema, we can safely take the first - the schema is the same + // for all in the set. + field, ok := cols[0].Schema().GetField(fieldName.(string)) if !ok { return core.Doc{}, nil, client.NewErrFieldNotExist(fieldName.(string)) } @@ -353,13 +358,19 @@ func (n *dagScanNode) dagBlockToNodeDoc(block blocks.Block) (core.Doc, []*ipld.L n.commitSelect.DocumentMapping.SetFirstOfName(&commit, request.DockeyFieldName, string(dockey)) - collection, err := n.planner.db.GetCollectionByVersionID(n.planner.ctx, schemaVersionId) + cols, err := n.planner.db.GetCollectionsByVersionID(n.planner.ctx, schemaVersionId) if err != nil { return core.Doc{}, nil, err } + if len(cols) == 0 { + return core.Doc{}, nil, client.NewErrCollectionNotFoundForSchemaVersion(schemaVersionId) + } + // WARNING: This will become incorrect once we allow multiple collections to share the same schema, + // we should by then instead fetch the collection be global collection ID: + // https://github.com/sourcenetwork/defradb/issues/1085 n.commitSelect.DocumentMapping.SetFirstOfName(&commit, - request.CollectionIDFieldName, int64(collection.ID())) + request.CollectionIDFieldName, int64(cols[0].ID())) heads := make([]*ipld.Link, 0) diff --git a/planner/datasource.go b/planner/datasource.go index afcfbab3ce..72ac7579b4 100644 --- a/planner/datasource.go +++ b/planner/datasource.go @@ -11,23 +11,13 @@ package planner import ( - "encoding/json" - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/planner/mapper" ) -// sourceInfo stores info about the data source -type sourceInfo struct { - collectionDescription client.CollectionDescription - // and more -} - type planSource struct { - info sourceInfo - plan planNode + collection client.Collection + plan planNode } func (p *Planner) getSource(parsed *mapper.Select) (planSource, error) { @@ -35,44 +25,19 @@ func (p *Planner) getSource(parsed *mapper.Select) (planSource, error) { return p.getCollectionScanPlan(parsed) } -func (p *Planner) getCollectionScanPlan(parsed *mapper.Select) (planSource, error) { - colDesc, err := p.getCollectionDesc(parsed.CollectionName) +func (p *Planner) getCollectionScanPlan(mapperSelect *mapper.Select) (planSource, error) { + col, err := p.db.GetCollectionByName(p.ctx, mapperSelect.CollectionName) if err != nil { return planSource{}, err } - scan, err := p.Scan(parsed) + scan, err := p.Scan(mapperSelect, col.Description()) if err != nil { return planSource{}, err } return planSource{ - plan: scan, - info: sourceInfo{ - collectionDescription: colDesc, - }, + plan: scan, + collection: col, }, nil } - -func (p *Planner) getCollectionDesc(name string) (client.CollectionDescription, error) { - collectionKey := core.NewCollectionKey(name) - var desc client.CollectionDescription - schemaVersionIdBytes, err := p.txn.Systemstore().Get(p.ctx, collectionKey.ToDS()) - if err != nil { - return desc, errors.Wrap("failed to get collection description", err) - } - - schemaVersionId := string(schemaVersionIdBytes) - schemaVersionKey := core.NewCollectionSchemaVersionKey(schemaVersionId) - buf, err := p.txn.Systemstore().Get(p.ctx, schemaVersionKey.ToDS()) - if err != nil { - return desc, err - } - - err = json.Unmarshal(buf, &desc) - if err != nil { - return desc, err - } - - return desc, nil -} diff --git a/planner/doc.go b/planner/doc.go index 6036009d1b..f51e5162e7 100644 --- a/planner/doc.go +++ b/planner/doc.go @@ -34,8 +34,8 @@ The plan is executed as defined above, result by result, by iteratively calling return True or False, depending on if it successfully produced a record, which can be accessed via the Values() method. The plan starts with a base ast.Document, which represents the entire provided request string, parsed into an -appropriate AST Document. The AST Document is generated by the https://github.com/graphql-go/graphql package. It is -then further parsed using a native DefraDB GraphQL Parser +appropriate AST Document. The AST Document is generated by the https://github.com/sourcenetwork/graphql-go package. It +is then further parsed using a native DefraDB GraphQL Parser (https://github.com/sourcenetwork/defradb/request/graphql/parser), which converts the complex AST Document, into a manageable structure, with all the relevant request information readily available. diff --git a/planner/explain.go b/planner/explain.go index 560063b4ba..07f96f9b0a 100644 --- a/planner/explain.go +++ b/planner/explain.go @@ -286,10 +286,10 @@ func buildSimpleExplainGraph(source planNode) (map[string]any, error) { // // Note: Can only be called once the entire plan has been executed. func collectExecuteExplainInfo(executedPlan planNode) (map[string]any, error) { - excuteExplainInfo := map[string]any{} + executeExplainInfo := map[string]any{} if executedPlan == nil { - return excuteExplainInfo, nil + return executeExplainInfo, nil } switch executedNode := executedPlan.(type) { @@ -303,16 +303,16 @@ func collectExecuteExplainInfo(executedPlan planNode) (map[string]any, error) { multiChildExplainGraph = append(multiChildExplainGraph, childExplainGraph) } explainNodeLabelTitle := strcase.ToLowerCamel(executedNode.Kind()) - excuteExplainInfo[explainNodeLabelTitle] = multiChildExplainGraph + executeExplainInfo[explainNodeLabelTitle] = multiChildExplainGraph case explainablePlanNode: - excuteExplainBuilder, err := executedNode.Explain(request.ExecuteExplain) + executeExplainBuilder, err := executedNode.Explain(request.ExecuteExplain) if err != nil { return nil, err } - if excuteExplainBuilder == nil { - excuteExplainBuilder = map[string]any{} + if executeExplainBuilder == nil { + executeExplainBuilder = map[string]any{} } if next := executedNode.Source(); next != nil && next.Kind() != topLevelNodeKind { @@ -321,21 +321,21 @@ func collectExecuteExplainInfo(executedPlan planNode) (map[string]any, error) { return nil, err } for key, value := range nextExplainGraph { - excuteExplainBuilder[key] = value + executeExplainBuilder[key] = value } } explainNodeLabelTitle := strcase.ToLowerCamel(executedNode.Kind()) - excuteExplainInfo[explainNodeLabelTitle] = excuteExplainBuilder + executeExplainInfo[explainNodeLabelTitle] = executeExplainBuilder default: var err error - excuteExplainInfo, err = collectExecuteExplainInfo(executedPlan.Source()) + executeExplainInfo, err = collectExecuteExplainInfo(executedPlan.Source()) if err != nil { return nil, err } } - return excuteExplainInfo, nil + return executeExplainInfo, nil } // executeAndExplainRequest executes the plan graph gathering the information/datapoints diff --git a/planner/filter/complex.go b/planner/filter/complex.go index 098caefc9c..acc2de4883 100644 --- a/planner/filter/complex.go +++ b/planner/filter/complex.go @@ -17,7 +17,7 @@ import ( // IsComplex returns true if the provided filter is complex. // A filter is considered complex if it contains a relation -// object withing an _or operator not necessarily being +// object withing an _or or _not operator not necessarily being // its direct child. func IsComplex(filter *mapper.Filter) bool { if filter == nil { diff --git a/planner/filter/copy_field.go b/planner/filter/copy_field.go index 59f7db3471..70b5dc2956 100644 --- a/planner/filter/copy_field.go +++ b/planner/filter/copy_field.go @@ -14,18 +14,22 @@ import ( "github.com/sourcenetwork/defradb/planner/mapper" ) -// copyField copies the given field from the provided filter. +// CopyField copies the given field from the provided filter. +// Multiple fields can be passed to copy related objects with a certain field. // The result filter preserves the structure of the original filter. -func copyField(filter *mapper.Filter, field mapper.Field) *mapper.Filter { - if filter == nil { +func CopyField(filter *mapper.Filter, fields ...mapper.Field) *mapper.Filter { + if filter == nil || len(fields) == 0 { return nil } - conditionKey := &mapper.PropertyIndex{ - Index: field.Index, + var conditionKeys []*mapper.PropertyIndex + for _, field := range fields { + conditionKeys = append(conditionKeys, &mapper.PropertyIndex{ + Index: field.Index, + }) } resultFilter := &mapper.Filter{} - conditionMap := traverseFilterByProperty(conditionKey, filter.Conditions, false) + conditionMap := traverseFilterByProperty(conditionKeys, filter.Conditions, false) if len(conditionMap) > 0 { resultFilter.Conditions = conditionMap return resultFilter @@ -34,7 +38,7 @@ func copyField(filter *mapper.Filter, field mapper.Field) *mapper.Filter { } func traverseFilterByProperty( - key *mapper.PropertyIndex, + keys []*mapper.PropertyIndex, conditions map[connor.FilterKey]any, shouldDelete bool, ) map[connor.FilterKey]any { @@ -43,11 +47,20 @@ func traverseFilterByProperty( result = make(map[connor.FilterKey]any) } for targetKey, clause := range conditions { - if targetKey.Equal(key) { - if shouldDelete { - delete(result, targetKey) + if targetKey.Equal(keys[0]) { + if len(keys) > 1 { + related := traverseFilterByProperty(keys[1:], clause.(map[connor.FilterKey]any), shouldDelete) + if shouldDelete && len(related) == 0 { + delete(result, targetKey) + } else if len(related) > 0 && !shouldDelete { + result[keys[0]] = clause + } } else { - result[key] = clause + if shouldDelete { + delete(result, targetKey) + } else { + result[keys[0]] = clause + } } } else if opKey, isOpKey := targetKey.(*mapper.Operator); isOpKey { clauseArr, isArr := clause.([]any) @@ -58,13 +71,15 @@ func traverseFilterByProperty( if !ok { continue } - compoundCond := traverseFilterByProperty(key, elementMap, shouldDelete) + compoundCond := traverseFilterByProperty(keys, elementMap, shouldDelete) if len(compoundCond) > 0 { resultArr = append(resultArr, compoundCond) } } if len(resultArr) > 0 { result[opKey] = resultArr + } else if shouldDelete { + delete(result, opKey) } } } diff --git a/planner/filter/copy_field_test.go b/planner/filter/copy_field_test.go index d3ec10cf62..1714db55b6 100644 --- a/planner/filter/copy_field_test.go +++ b/planner/filter/copy_field_test.go @@ -13,6 +13,7 @@ import ( "testing" "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/connor" "github.com/sourcenetwork/defradb/planner/mapper" "github.com/stretchr/testify/assert" @@ -21,7 +22,7 @@ import ( func TestCopyField(t *testing.T) { tests := []struct { name string - inputField mapper.Field + inputField []mapper.Field inputFilter map[string]any expectedFilter map[string]any }{ @@ -31,7 +32,7 @@ func TestCopyField(t *testing.T) { "name": m("_eq", "John"), "age": m("_gt", 55), }, - inputField: mapper.Field{Index: 1}, // age + inputField: []mapper.Field{{Index: authorAgeInd}}, expectedFilter: m("age", m("_gt", 55)), }, { @@ -40,7 +41,7 @@ func TestCopyField(t *testing.T) { m("name", m("_eq", "John")), m("age", m("_gt", 55)), ), - inputField: mapper.Field{Index: 1}, // age + inputField: []mapper.Field{{Index: authorAgeInd}}, expectedFilter: r("_and", m("age", m("_gt", 55)), ), @@ -59,7 +60,7 @@ func TestCopyField(t *testing.T) { m("age", m("_lt", 55)), ), ), - inputField: mapper.Field{Index: 1}, // age + inputField: []mapper.Field{{Index: authorAgeInd}}, expectedFilter: r("_and", r("_or", r("_and", @@ -71,13 +72,48 @@ func TestCopyField(t *testing.T) { ), ), }, + { + name: "field of related object", + inputFilter: r("_and", + r("_or", + r("_and", + m("published", m("rating", m("_gt", 4.0))), + m("age", m("_gt", 30)), + ), + ), + m("published", m("genre", m("_eq", "Comedy"))), + m("name", m("_eq", "John")), + ), + inputField: []mapper.Field{{Index: authorPublishedInd}, {Index: bookRatingInd}}, + expectedFilter: r("_and", + r("_or", + r("_and", + m("published", m("rating", m("_gt", 4.0))), + ), + ), + ), + }, + { + name: "field of related object (deeper)", + inputFilter: r("_and", + m("published", m("rating", m("_gt", 4.0))), + m("age", m("_gt", 30)), + m("published", m("stores", m("address", m("_eq", "123 Main St")))), + m("published", m("genre", m("_eq", "Comedy"))), + m("name", m("_eq", "John")), + ), + inputField: []mapper.Field{{Index: authorPublishedInd}, {Index: bookStoresInd}, {Index: storeAddressInd}}, + expectedFilter: r("_and", + m("published", m("stores", m("address", m("_eq", "123 Main St")))), + ), + }, } mapping := getDocMapping() for _, test := range tests { t.Run(test.name, func(t *testing.T) { inputFilter := mapper.ToFilter(request.Filter{Conditions: test.inputFilter}, mapping) - actualFilter := copyField(inputFilter, test.inputField) + actualFilter := CopyField(inputFilter, test.inputField...) expectedFilter := mapper.ToFilter(request.Filter{Conditions: test.expectedFilter}, mapping) AssertEqualFilterMap(t, expectedFilter.Conditions, actualFilter.Conditions) }) @@ -85,6 +121,15 @@ func TestCopyField(t *testing.T) { } func TestCopyFieldOfNullFilter(t *testing.T) { - actualFilter := copyField(nil, mapper.Field{Index: 1}) + actualFilter := CopyField(nil, mapper.Field{Index: 1}) + assert.Nil(t, actualFilter) +} + +func TestCopyFieldWithNoFieldGiven(t *testing.T) { + filter := mapper.NewFilter() + filter.Conditions = map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: 0}: &mapper.Operator{Operation: "_eq"}, + } + actualFilter := CopyField(filter) assert.Nil(t, actualFilter) } diff --git a/planner/filter/copy_test.go b/planner/filter/copy_test.go index ccb471c2b6..a45d368964 100644 --- a/planner/filter/copy_test.go +++ b/planner/filter/copy_test.go @@ -23,20 +23,20 @@ func TestCopyFilter(t *testing.T) { return map[connor.FilterKey]any{ &mapper.Operator{Operation: "_or"}: []any{ map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 0}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: authorNameInd}: map[connor.FilterKey]any{ &mapper.Operator{Operation: "_eq"}: "Some name", }, }, map[connor.FilterKey]any{ &mapper.Operator{Operation: "_and"}: []any{ map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 1}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: authorAgeInd}: map[connor.FilterKey]any{ &mapper.Operator{Operation: "_gt"}: 64, }, }, map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 2}: map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 1}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: authorPublishedInd}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: bookRatingInd}: map[connor.FilterKey]any{ &mapper.Operator{Operation: "_gt"}: 4.8, }, }, @@ -46,13 +46,13 @@ func TestCopyFilter(t *testing.T) { map[connor.FilterKey]any{ &mapper.Operator{Operation: "_and"}: []any{ map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 1}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: authorAgeInd}: map[connor.FilterKey]any{ &mapper.Operator{Operation: "_lt"}: 64, }, }, map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 2}: map[connor.FilterKey]any{ - &mapper.PropertyIndex{Index: 1}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: authorPublishedInd}: map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: bookRatingInd}: map[connor.FilterKey]any{ &mapper.Operator{Operation: "_lt"}: 4.8, }, }, diff --git a/planner/filter/extract_properties.go b/planner/filter/extract_properties.go new file mode 100644 index 0000000000..4c3e6bb0be --- /dev/null +++ b/planner/filter/extract_properties.go @@ -0,0 +1,78 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +package filter + +import ( + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/connor" + "github.com/sourcenetwork/defradb/planner/mapper" +) + +// Property represents a single field and is being filtered on. +// It contains the index of the field in the core.DocumentMapping +// as well as index -> Property map of the fields in case the field is an object. +type Property struct { + Index int + Fields map[int]Property +} + +func (p Property) IsRelation() bool { + return len(p.Fields) > 0 +} + +func mergeProps(p1, p2 Property) Property { + if p1.Index == 0 { + p1.Index = p2.Index + } + if p1.Fields == nil { + p1.Fields = p2.Fields + } else { + for k, v := range p2.Fields { + p1.Fields[k] = mergeProps(p1.Fields[k], v) + } + } + return p1 +} + +// ExtractProperties runs through the filter and returns a index -> Property map of the fields +// being filtered on. +func ExtractProperties(conditions map[connor.FilterKey]any) map[int]Property { + properties := map[int]Property{} + for k, v := range conditions { + switch typedKey := k.(type) { + case *mapper.PropertyIndex: + prop := properties[typedKey.Index] + prop.Index = typedKey.Index + relatedProps := ExtractProperties(v.(map[connor.FilterKey]any)) + properties[typedKey.Index] = mergeProps(prop, Property{Fields: relatedProps}) + case *mapper.Operator: + if typedKey.Operation == request.FilterOpAnd || typedKey.Operation == request.FilterOpOr { + compoundContent := v.([]any) + for _, compoundFilter := range compoundContent { + props := ExtractProperties(compoundFilter.(map[connor.FilterKey]any)) + for _, prop := range props { + existingProp := properties[prop.Index] + properties[prop.Index] = mergeProps(existingProp, prop) + } + } + } else if typedKey.Operation == request.FilterOpNot { + props := ExtractProperties(v.(map[connor.FilterKey]any)) + for _, prop := range props { + existingProp := properties[prop.Index] + properties[prop.Index] = mergeProps(existingProp, prop) + } + } + } + } + if len(properties) == 0 { + return nil + } + return properties +} diff --git a/planner/filter/extract_properties_test.go b/planner/filter/extract_properties_test.go new file mode 100644 index 0000000000..c90dbe85b0 --- /dev/null +++ b/planner/filter/extract_properties_test.go @@ -0,0 +1,115 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +package filter + +import ( + "reflect" + "testing" + + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/planner/mapper" + + "github.com/stretchr/testify/assert" +) + +func TestExtractProperties(t *testing.T) { + tests := []struct { + name string + inputFilter map[string]any + expectedFilter map[int]Property + }{ + { + name: "no nesting", + inputFilter: map[string]any{ + "name": m("_eq", "John"), + "age": m("_gt", 55), + }, + expectedFilter: map[int]Property{ + authorNameInd: {Index: authorNameInd}, + authorAgeInd: {Index: authorAgeInd}, + }, + }, + { + name: "within _and, _or and _not", + inputFilter: r("_or", + m("name", m("_eq", "John")), + r("_and", + m("age", m("_gt", 55)), + m("_not", + r("_or", + m("verified", m("_eq", true)), + ), + ), + ), + ), + expectedFilter: map[int]Property{ + authorNameInd: {Index: authorNameInd}, + authorAgeInd: {Index: authorAgeInd}, + authorVerifiedInd: {Index: authorVerifiedInd}, + }, + }, + { + name: "related field", + inputFilter: r("_or", + m("name", m("_eq", "John")), + m("published", m("genre", m("_eq", "Comedy"))), + ), + expectedFilter: map[int]Property{ + authorNameInd: {Index: authorNameInd}, + authorPublishedInd: { + Index: authorPublishedInd, + Fields: map[int]Property{bookGenreInd: {Index: bookGenreInd}}, + }, + }, + }, + { + name: "several related field with deeper nesting", + inputFilter: r("_or", + m("name", m("_eq", "John")), + m("published", m("genre", m("_eq", "Comedy"))), + m("published", m("rating", m("_gt", 55))), + m("published", m("stores", m("name", m("_eq", "Amazon")))), + m("published", m("stores", m("address", m("_gt", "5th Avenue")))), + ), + expectedFilter: map[int]Property{ + authorNameInd: {Index: authorNameInd}, + authorPublishedInd: { + Index: authorPublishedInd, + Fields: map[int]Property{ + bookGenreInd: {Index: bookGenreInd}, + bookRatingInd: {Index: bookRatingInd}, + bookStoresInd: { + Index: bookStoresInd, + Fields: map[int]Property{ + storeNameInd: {Index: storeNameInd}, + storeAddressInd: {Index: storeAddressInd}, + }, + }, + }, + }, + }, + }, + } + + mapping := getDocMapping() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + inputFilter := mapper.ToFilter(request.Filter{Conditions: test.inputFilter}, mapping) + actualFilter := ExtractProperties(inputFilter.Conditions) + reflect.DeepEqual(test.expectedFilter, actualFilter) + assert.Equal(t, test.expectedFilter, actualFilter) + }) + } +} + +func TestExtractPropertiesOfNullFilter(t *testing.T) { + actualFilter := CopyField(nil, mapper.Field{Index: 1}) + assert.Nil(t, actualFilter) +} diff --git a/planner/filter/normalize.go b/planner/filter/normalize.go index 5f7d275418..181b1f8485 100644 --- a/planner/filter/normalize.go +++ b/planner/filter/normalize.go @@ -16,134 +16,205 @@ import ( ) // normalize normalizes the provided filter conditions. +// // The following cases are subject of normalization: // - _and or _or with one element is removed flattened // - double _not is removed // - any number of consecutive _ands with any number of elements is flattened +// // As the result object is a map with unique keys (a.k.a. properties), // while performing flattening of compound operators if the same property // is present in the result map, both conditions will be moved into an _and func normalize(conditions map[connor.FilterKey]any) map[connor.FilterKey]any { - return normalizeConditions(conditions, false).(map[connor.FilterKey]any) + return normalizeCondition(nil, conditions).(map[connor.FilterKey]any) } -func conditionsArrToMap(conditions []any) map[connor.FilterKey]any { +// normalizeCondition returns a normalized version of the given condition. +func normalizeCondition(parentKey connor.FilterKey, condition any) (result any) { + switch t := condition.(type) { + case map[connor.FilterKey]any: + result = normalizeConditions(parentKey, t) + + case []any: + conditions := make([]any, len(t)) + for i, c := range t { + conditions[i] = normalizeCondition(parentKey, c) + } + result = conditions + + default: + result = t + } + + return normalizeProperty(parentKey, result) +} + +// normalizeConditions returns a normalized version of the given conditions. +func normalizeConditions(parentKey connor.FilterKey, conditions map[connor.FilterKey]any) map[connor.FilterKey]any { result := make(map[connor.FilterKey]any) - for _, clause := range conditions { - if clauseMap, ok := clause.(map[connor.FilterKey]any); ok { - for k, v := range clauseMap { - result[k] = v + for key, val := range conditions { + result[key] = normalizeCondition(key, val) + + // check if the condition is an operator that can be normalized + op, ok := key.(*mapper.Operator) + if !ok { + continue + } + // check if we have any conditions that can be merged + merge := normalizeOperator(parentKey, op, result[key]) + if len(merge) == 0 { + continue + } + delete(result, key) + + // merge properties directly into result + for _, c := range merge { + for key, val := range c.(map[connor.FilterKey]any) { + result[key] = val } } + + // if the merged filter was an _or operator + // there may be child filters that can be merged + if op.Operation == request.FilterOpOr { + result = normalizeConditions(parentKey, result) + } } return result } -func addNormalizedCondition(key connor.FilterKey, val any, m map[connor.FilterKey]any) { - if _, isProp := key.(*mapper.PropertyIndex); isProp { - var andOp *mapper.Operator - var andContent []any - for existingKey := range m { - if op, isOp := existingKey.(*mapper.Operator); isOp && op.Operation == request.FilterOpAnd { - andOp = op - andContent = m[existingKey].([]any) - break - } - } - for existingKey := range m { - if existingKey.Equal(key) { - existingVal := m[existingKey] - delete(m, existingKey) - if andOp == nil { - andOp = &mapper.Operator{Operation: request.FilterOpAnd} - } - m[andOp] = append( - andContent, - map[connor.FilterKey]any{existingKey: existingVal}, - map[connor.FilterKey]any{key: val}, - ) - return - } +// normalizeOperator returns a normalized array of conditions. +func normalizeOperator(parentKey connor.FilterKey, op *mapper.Operator, condition any) []any { + switch op.Operation { + case request.FilterOpNot: + return normalizeOperatorNot(condition) + + case request.FilterOpOr: + return normalizeOperatorOr(condition) + + case request.FilterOpAnd: + return normalizeOperatorAnd(parentKey, condition) + + default: + return nil + } +} + +// normalizeOperatorAnd returns an array of conditions with all _and operators merged. +// +// If the parent operator is _not or _or, the subconditions will not be merged. +func normalizeOperatorAnd(parentKey connor.FilterKey, condition any) []any { + result := condition.([]any) + // always merge if only 1 property + if len(result) == 1 { + return result + } + // always merge if parent is not an operator + parentOp, ok := parentKey.(*mapper.Operator) + if !ok { + return result + } + // don't merge if parent is a _not or _or operator + if parentOp.Operation == request.FilterOpNot || parentOp.Operation == request.FilterOpOr { + return nil + } + return result +} + +// normalizeOperatorOr returns an array of conditions with all single _or operators merged. +func normalizeOperatorOr(condition any) []any { + result := condition.([]any) + // don't merge if more than 1 property + if len(result) > 1 { + return nil + } + return result +} + +// normalizeOperatorNot returns an array of conditions with all double _not operators merged. +func normalizeOperatorNot(condition any) (result []any) { + subConditions := condition.(map[connor.FilterKey]any) + // don't merge if more than 1 property + if len(subConditions) > 1 { + return nil + } + // find double _not occurances + for subKey, subCondition := range subConditions { + op, ok := subKey.(*mapper.Operator) + if ok && op.Operation == request.FilterOpNot { + result = append(result, subCondition) } - for _, andElement := range andContent { - elementMap := andElement.(map[connor.FilterKey]any) - for andElementKey := range elementMap { - if andElementKey.Equal(key) { - m[andOp] = append(andContent, map[connor.FilterKey]any{key: val}) - return - } + } + return result +} + +// normalizeProperty flattens and groups property filters where possible. +// +// Filters targeting the same property will be grouped into a single _and. +func normalizeProperty(parentKey connor.FilterKey, condition any) any { + switch t := condition.(type) { + case map[connor.FilterKey]any: + results := make(map[connor.FilterKey]any) + for _, c := range normalizeProperties(parentKey, []any{t}) { + for key, val := range c.(map[connor.FilterKey]any) { + results[key] = val } } + return results + + case []any: + return normalizeProperties(parentKey, t) + + default: + return t } - m[key] = val } -func normalizeConditions(conditions any, skipRoot bool) any { - result := make(map[connor.FilterKey]any) - switch typedConditions := conditions.(type) { - case map[connor.FilterKey]any: - for rootKey, rootVal := range typedConditions { - rootOpKey, isRootOp := rootKey.(*mapper.Operator) - if isRootOp { - if rootOpKey.Operation == request.FilterOpAnd || rootOpKey.Operation == request.FilterOpOr { - rootValArr := rootVal.([]any) - if len(rootValArr) == 1 || rootOpKey.Operation == request.FilterOpAnd && !skipRoot { - flat := normalizeConditions(conditionsArrToMap(rootValArr), false) - flatMap := flat.(map[connor.FilterKey]any) - for k, v := range flatMap { - addNormalizedCondition(k, v, result) - } - } else { - resultArr := []any{} - for i := range rootValArr { - norm := normalizeConditions(rootValArr[i], !skipRoot) - normMap, ok := norm.(map[connor.FilterKey]any) - if ok { - for k, v := range normMap { - resultArr = append(resultArr, map[connor.FilterKey]any{k: v}) - } - } else { - resultArr = append(resultArr, norm) - } - } - addNormalizedCondition(rootKey, resultArr, result) - } - } else if rootOpKey.Operation == request.FilterOpNot { - notMap := rootVal.(map[connor.FilterKey]any) - if len(notMap) == 1 { - var k connor.FilterKey - for k = range notMap { - break - } - norm := normalizeConditions(notMap, true).(map[connor.FilterKey]any) - delete(notMap, k) - var v any - for k, v = range norm { - break - } - if opKey, ok := k.(*mapper.Operator); ok && opKey.Operation == request.FilterOpNot { - notNotMap := normalizeConditions(v, false).(map[connor.FilterKey]any) - for notNotKey, notNotVal := range notNotMap { - addNormalizedCondition(notNotKey, notNotVal, result) - } - } else { - notMap[k] = v - addNormalizedCondition(rootOpKey, notMap, result) - } - } else { - addNormalizedCondition(rootKey, rootVal, result) - } - } else { - addNormalizedCondition(rootKey, rootVal, result) - } +// normalizeProperty flattens and groups property filters where possible. +// +// Filters targeting the same property will be grouped into a single _and. +func normalizeProperties(parentKey connor.FilterKey, conditions []any) []any { + var merge []any + var result []any + + // can only merge _and groups if parent is not an _or operator + parentOp, isParentOp := parentKey.(*mapper.Operator) + canMergeAnd := !isParentOp || parentOp.Operation != request.FilterOpOr + + // accumulate properties that can be merged into a single _and + // if canMergeAnd is true, all _and groups will be merged + props := make(map[int][]any) + for _, c := range conditions { + for key, val := range c.(map[connor.FilterKey]any) { + op, ok := key.(*mapper.Operator) + if canMergeAnd && ok && op.Operation == request.FilterOpAnd { + merge = append(merge, val.([]any)...) + } else if prop, ok := key.(*mapper.PropertyIndex); ok { + props[prop.Index] = append(props[prop.Index], map[connor.FilterKey]any{key: val}) } else { - addNormalizedCondition(rootKey, normalizeConditions(rootVal, false), result) + result = append(result, map[connor.FilterKey]any{key: val}) } } + } + + // merge filters with duplicate keys into a single _and + for _, val := range props { + if len(val) == 1 { + // only 1 property so no merge required + result = append(result, val...) + } else { + // multiple properties require merge with _and + merge = append(merge, val...) + } + } + + // nothing to merge + if len(merge) == 0 { return result - case []any: - return conditionsArrToMap(typedConditions) - default: - return conditions } + + // merge into a single _and operator + key := &mapper.Operator{Operation: request.FilterOpAnd} + result = append(result, map[connor.FilterKey]any{key: merge}) + return result } diff --git a/planner/filter/remove_field.go b/planner/filter/remove_field.go index 5c80ffc96c..5e9f2f532e 100644 --- a/planner/filter/remove_field.go +++ b/planner/filter/remove_field.go @@ -14,13 +14,17 @@ import ( ) // RemoveField removes the given field from the provided filter. -func RemoveField(filter *mapper.Filter, field mapper.Field) { - if filter == nil { +// Multiple fields can be passed to remove related objects with a certain field. +func RemoveField(filter *mapper.Filter, fields ...mapper.Field) { + if filter == nil || len(fields) == 0 { return } - conditionKey := &mapper.PropertyIndex{ - Index: field.Index, + var conditionKeys []*mapper.PropertyIndex + for _, field := range fields { + conditionKeys = append(conditionKeys, &mapper.PropertyIndex{ + Index: field.Index, + }) } - traverseFilterByProperty(conditionKey, filter.Conditions, true) + traverseFilterByProperty(conditionKeys, filter.Conditions, true) } diff --git a/planner/filter/remove_field_test.go b/planner/filter/remove_field_test.go index 2b6e8cdd3a..8a34999e60 100644 --- a/planner/filter/remove_field_test.go +++ b/planner/filter/remove_field_test.go @@ -13,13 +13,14 @@ import ( "testing" "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/connor" "github.com/sourcenetwork/defradb/planner/mapper" ) func TestRemoveFieldFromFilter(t *testing.T) { tests := []struct { name string - inputField mapper.Field + inputField []mapper.Field inputFilter map[string]any expectedFilter map[string]any }{ @@ -29,7 +30,7 @@ func TestRemoveFieldFromFilter(t *testing.T) { "name": m("_eq", "John"), "age": m("_gt", 55), }, - inputField: mapper.Field{Index: 1}, // age + inputField: []mapper.Field{{Index: authorAgeInd}}, expectedFilter: m("name", m("_eq", "John")), }, { @@ -38,7 +39,7 @@ func TestRemoveFieldFromFilter(t *testing.T) { m("name", m("_eq", "John")), m("age", m("_gt", 55)), ), - inputField: mapper.Field{Index: 1}, // age + inputField: []mapper.Field{{Index: authorAgeInd}}, expectedFilter: r("_and", m("name", m("_eq", "John")), ), @@ -57,7 +58,7 @@ func TestRemoveFieldFromFilter(t *testing.T) { m("age", m("_lt", 55)), ), ), - inputField: mapper.Field{Index: 1}, // age + inputField: []mapper.Field{{Index: authorAgeInd}}, expectedFilter: r("_and", r("_or", r("_and", @@ -69,13 +70,69 @@ func TestRemoveFieldFromFilter(t *testing.T) { ), ), }, + { + name: "remove _or/_and if only element", + inputFilter: r("_and", + r("_or", + r("_and", + m("age", m("_gt", 30)), + ), + ), + r("_or", + m("age", m("_lt", 55)), + ), + m("name", m("_eq", "Islam")), + ), + inputField: []mapper.Field{{Index: authorAgeInd}}, + expectedFilter: r("_and", + m("name", m("_eq", "Islam")), + ), + }, + { + name: "field of related object", + inputFilter: r("_and", + r("_or", + r("_and", + m("published", m("rating", m("_gt", 4.0))), + m("age", m("_gt", 30)), + ), + ), + m("published", m("genre", m("_eq", "Comedy"))), + m("name", m("_eq", "John")), + ), + inputField: []mapper.Field{{Index: authorPublishedInd}, {Index: bookRatingInd}}, + expectedFilter: r("_and", + r("_or", + r("_and", + m("age", m("_gt", 30)), + ), + ), + m("published", m("genre", m("_eq", "Comedy"))), + m("name", m("_eq", "John")), + ), + }, + { + name: "field of related object (deeper)", + inputFilter: r("_and", + m("age", m("_gt", 30)), + m("published", m("stores", m("address", m("_eq", "123 Main St")))), + m("published", m("stores", m("name", m("_eq", "Barnes & Noble")))), + m("published", m("genre", m("_eq", "Comedy"))), + ), + inputField: []mapper.Field{{Index: authorPublishedInd}, {Index: bookStoresInd}, {Index: storeAddressInd}}, + expectedFilter: r("_and", + m("age", m("_gt", 30)), + m("published", m("stores", m("name", m("_eq", "Barnes & Noble")))), + m("published", m("genre", m("_eq", "Comedy"))), + ), + }, } mapping := getDocMapping() for _, test := range tests { t.Run(test.name, func(t *testing.T) { inputFilter := mapper.ToFilter(request.Filter{Conditions: test.inputFilter}, mapping) - RemoveField(inputFilter, test.inputField) + RemoveField(inputFilter, test.inputField...) expectedFilter := mapper.ToFilter(request.Filter{Conditions: test.expectedFilter}, mapping) AssertEqualFilterMap(t, expectedFilter.Conditions, inputFilter.Conditions) }) @@ -85,3 +142,17 @@ func TestRemoveFieldFromFilter(t *testing.T) { func TestRemoveFieldFromNullFilter(t *testing.T) { RemoveField(nil, mapper.Field{Index: 1}) } + +func TestRemoveFieldWithNoFieldGiven(t *testing.T) { + getFilter := func() *mapper.Filter { + f := mapper.NewFilter() + f.Conditions = map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: 0}: &mapper.Operator{Operation: "_eq"}, + } + return f + } + f := getFilter() + RemoveField(f) + + AssertEqualFilter(t, getFilter(), f) +} diff --git a/planner/filter/split.go b/planner/filter/split.go index bba822145a..1ef153746b 100644 --- a/planner/filter/split.go +++ b/planner/filter/split.go @@ -27,8 +27,12 @@ func SplitByField(filter *mapper.Filter, field mapper.Field) (*mapper.Filter, *m return nil, nil } - splitF := copyField(filter, field) + splitF := CopyField(filter, field) RemoveField(filter, field) + if len(filter.Conditions) == 0 { + filter = nil + } + return filter, splitF } diff --git a/planner/filter/split_test.go b/planner/filter/split_test.go index 1bcbecffb7..86fbb0b44a 100644 --- a/planner/filter/split_test.go +++ b/planner/filter/split_test.go @@ -32,10 +32,28 @@ func TestSplitFilter(t *testing.T) { "name": m("_eq", "John"), "age": m("_gt", 55), }, - inputField: mapper.Field{Index: 1}, // age + inputField: mapper.Field{Index: authorAgeInd}, expectedFilter1: m("name", m("_eq", "John")), expectedFilter2: m("age", m("_gt", 55)), }, + { + name: "the only field", + inputFilter: map[string]any{ + "age": m("_gt", 55), + }, + inputField: mapper.Field{Index: authorAgeInd}, + expectedFilter1: nil, + expectedFilter2: m("age", m("_gt", 55)), + }, + { + name: "no field to delete", + inputFilter: map[string]any{ + "name": m("_eq", "John"), + }, + inputField: mapper.Field{Index: authorAgeInd}, + expectedFilter1: m("name", m("_eq", "John")), + expectedFilter2: nil, + }, } mapping := getDocMapping() @@ -45,14 +63,18 @@ func TestSplitFilter(t *testing.T) { actualFilter1, actualFilter2 := SplitByField(inputFilter, test.inputField) expectedFilter1 := mapper.ToFilter(request.Filter{Conditions: test.expectedFilter1}, mapping) expectedFilter2 := mapper.ToFilter(request.Filter{Conditions: test.expectedFilter2}, mapping) - AssertEqualFilterMap(t, expectedFilter1.Conditions, actualFilter1.Conditions) - AssertEqualFilterMap(t, expectedFilter2.Conditions, actualFilter2.Conditions) + if expectedFilter1 != nil || actualFilter1 != nil { + AssertEqualFilterMap(t, expectedFilter1.Conditions, actualFilter1.Conditions) + } + if expectedFilter2 != nil || actualFilter2 != nil { + AssertEqualFilterMap(t, expectedFilter2.Conditions, actualFilter2.Conditions) + } }) } } func TestSplitNullFilter(t *testing.T) { - actualFilter1, actualFilter2 := SplitByField(nil, mapper.Field{Index: 1}) + actualFilter1, actualFilter2 := SplitByField(nil, mapper.Field{Index: authorAgeInd}) assert.Nil(t, actualFilter1) assert.Nil(t, actualFilter2) } diff --git a/planner/filter/unwrap_relation.go b/planner/filter/unwrap_relation.go new file mode 100644 index 0000000000..aa1be2e25d --- /dev/null +++ b/planner/filter/unwrap_relation.go @@ -0,0 +1,86 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +package filter + +import ( + "github.com/sourcenetwork/defradb/connor" + "github.com/sourcenetwork/defradb/planner/mapper" +) + +// UnwrapRelation runs through the filter and returns a new filter with only the +// fields of a given relation object +// Example: +// +// { +// "published": { +// "rating": { +// "_gt": 4.0 +// } +// } +// } +// +// with given "published" field will return +// +// { +// "rating": { +// "_gt": 4.0 +// } +// } +func UnwrapRelation(filter *mapper.Filter, field mapper.Field) *mapper.Filter { + if filter == nil { + return nil + } + conditionKey := &mapper.PropertyIndex{ + Index: field.Index, + } + + resultFilter := &mapper.Filter{} + conditionMap := traverseFilterAndExtract(conditionKey, filter.Conditions, false) + if len(conditionMap) > 0 { + resultFilter.Conditions = conditionMap + return resultFilter + } + return nil +} + +func traverseFilterAndExtract( + key *mapper.PropertyIndex, + conditions map[connor.FilterKey]any, + shouldDelete bool, +) map[connor.FilterKey]any { + result := make(map[connor.FilterKey]any) + for targetKey, clause := range conditions { + if targetKey.Equal(key) { + clauseMap := clause.(map[connor.FilterKey]any) + for k, v := range clauseMap { + result[k] = v + } + } else if opKey, isOpKey := targetKey.(*mapper.Operator); isOpKey { + clauseArr, isArr := clause.([]any) + if isArr { + resultArr := make([]any, 0) + for _, elementClause := range clauseArr { + elementMap, ok := elementClause.(map[connor.FilterKey]any) + if !ok { + continue + } + compoundCond := traverseFilterAndExtract(key, elementMap, shouldDelete) + if len(compoundCond) > 0 { + resultArr = append(resultArr, compoundCond) + } + } + if len(resultArr) > 0 { + result[opKey] = resultArr + } + } + } + } + return result +} diff --git a/planner/filter/unwrap_relation_test.go b/planner/filter/unwrap_relation_test.go new file mode 100644 index 0000000000..a7446f9d30 --- /dev/null +++ b/planner/filter/unwrap_relation_test.go @@ -0,0 +1,99 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. +package filter + +import ( + "testing" + + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/connor" + "github.com/sourcenetwork/defradb/planner/mapper" + + "github.com/stretchr/testify/assert" +) + +func TestUnwrapRelation(t *testing.T) { + tests := []struct { + name string + inputFilter map[string]any + expectedFilter map[string]any + }{ + { + name: "simple", + inputFilter: m("published", m("rating", m("_gt", 4.0))), + expectedFilter: m("rating", m("_gt", 4.0)), + }, + { + name: "no relation object", + inputFilter: map[string]any{ + "name": m("_eq", "John"), + "age": m("_gt", 55), + }, + expectedFilter: nil, + }, + { + name: "within _or and _and", + inputFilter: r("_and", + r("_or", + r("_and", + m("name", m("_eq", "John")), + m("age", m("_gt", 30)), + m("published", m("stores", m("address", m("_eq", "123 Main St")))), + m("published", m("rating", m("_gt", 4.0))), + ), + ), + r("_or", + m("published", m("stores", m("address", m("_eq", "2 Ave")))), + ), + m("published", m("genre", m("_eq", "Comedy"))), + ), + expectedFilter: r("_and", + r("_or", + r("_and", + m("stores", m("address", m("_eq", "123 Main St"))), + m("rating", m("_gt", 4.0)), + ), + ), + r("_or", + m("stores", m("address", m("_eq", "2 Ave"))), + ), + m("genre", m("_eq", "Comedy")), + ), + }, + } + + mapping := getDocMapping() + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + inputFilter := mapper.ToFilter(request.Filter{Conditions: test.inputFilter}, mapping) + actualFilter := UnwrapRelation(inputFilter, mapper.Field{Index: authorPublishedInd}) + childMapping := mapping.ChildMappings[authorPublishedInd] + expectedFilter := mapper.ToFilter(request.Filter{Conditions: test.expectedFilter}, childMapping) + if expectedFilter == nil && actualFilter == nil { + return + } + AssertEqualFilterMap(t, expectedFilter.Conditions, actualFilter.Conditions) + }) + } +} + +func TestUnwrapRelationOfNullFilter(t *testing.T) { + actualFilter := CopyField(nil, mapper.Field{Index: 1}) + assert.Nil(t, actualFilter) +} + +func TestUnwrapRelationWithNoFieldGiven(t *testing.T) { + filter := mapper.NewFilter() + filter.Conditions = map[connor.FilterKey]any{ + &mapper.PropertyIndex{Index: 0}: &mapper.Operator{Operation: "_eq"}, + } + actualFilter := CopyField(filter) + assert.Nil(t, actualFilter) +} diff --git a/planner/filter/util_test.go b/planner/filter/util_test.go index e8860081c8..19b367172c 100644 --- a/planner/filter/util_test.go +++ b/planner/filter/util_test.go @@ -130,11 +130,55 @@ func r(op string, vals ...any) map[string]any { return m(op, vals) } +const ( + authorNameInd = iota + authorAgeInd + authorPublishedInd + authorVerifiedInd + authorNumFields +) + +const ( + bookRatingInd = iota + bookGenreInd + bookNameInd + bookStoresInd + bookNumFields +) + +const ( + storeAddressInd = iota + storeNameInd + storeNumFields +) + func getDocMapping() *core.DocumentMapping { + bookChildMappings := make([]*core.DocumentMapping, bookNumFields) + bookChildMappings[bookStoresInd] = &core.DocumentMapping{ + IndexesByName: map[string][]int{ + "address": {storeAddressInd}, + "name": {storeNameInd}, + }, + } + + authorChildMappings := make([]*core.DocumentMapping, authorNumFields) + authorChildMappings[authorPublishedInd] = &core.DocumentMapping{ + IndexesByName: map[string][]int{ + "rating": {bookRatingInd}, + "genre": {bookGenreInd}, + "name": {bookNameInd}, + "stores": {bookStoresInd}, + }, + ChildMappings: bookChildMappings, + } + return &core.DocumentMapping{ - IndexesByName: map[string][]int{"name": {0}, "age": {1}, "published": {2}, "verified": {3}}, - ChildMappings: []*core.DocumentMapping{nil, nil, { - IndexesByName: map[string][]int{"rating": {11}, "genre": {12}}, - }}, + IndexesByName: map[string][]int{ + "name": {authorNameInd}, + "age": {authorAgeInd}, + "published": {authorPublishedInd}, + "verified": {authorVerifiedInd}, + }, + ChildMappings: authorChildMappings, } } diff --git a/planner/mapper/descriptions.go b/planner/mapper/descriptions.go deleted file mode 100644 index e7edd865cd..0000000000 --- a/planner/mapper/descriptions.go +++ /dev/null @@ -1,66 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package mapper - -import ( - "context" - "encoding/json" - - "github.com/sourcenetwork/defradb/client" - "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/errors" -) - -// DescriptionsRepo is a cache of previously requested collection descriptions -// that can be used to reduce multiple reads of the same collection description. -type DescriptionsRepo struct { - ctx context.Context - txn datastore.Txn - - collectionDescriptionsByName map[string]client.CollectionDescription -} - -// NewDescriptionsRepo instantiates a new DescriptionsRepo with the given context and transaction. -func NewDescriptionsRepo(ctx context.Context, txn datastore.Txn) *DescriptionsRepo { - return &DescriptionsRepo{ - ctx: ctx, - txn: txn, - collectionDescriptionsByName: map[string]client.CollectionDescription{}, - } -} - -// getCollectionDesc returns the description of the collection with the given name. -// -// Will return nil and an error if a description of the given name is not found. Will first look -// in the repo's cache for the description before doing a query operation on the datastore. -func (r *DescriptionsRepo) getCollectionDesc(name string) (client.CollectionDescription, error) { - collectionKey := core.NewCollectionKey(name) - var desc client.CollectionDescription - schemaVersionIdBytes, err := r.txn.Systemstore().Get(r.ctx, collectionKey.ToDS()) - if err != nil { - return desc, errors.Wrap("failed to get collection description", err) - } - - schemaVersionId := string(schemaVersionIdBytes) - schemaVersionKey := core.NewCollectionSchemaVersionKey(schemaVersionId) - buf, err := r.txn.Systemstore().Get(r.ctx, schemaVersionKey.ToDS()) - if err != nil { - return desc, err - } - - err = json.Unmarshal(buf, &desc) - if err != nil { - return desc, err - } - - return desc, nil -} diff --git a/planner/mapper/mapper.go b/planner/mapper/mapper.go index b6f80a55a2..418c0c5c57 100644 --- a/planner/mapper/mapper.go +++ b/planner/mapper/mapper.go @@ -21,7 +21,6 @@ import ( "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/connor" "github.com/sourcenetwork/defradb/core" - "github.com/sourcenetwork/defradb/datastore" ) var ( @@ -32,10 +31,9 @@ var ( // // In the process of doing so it will construct the document map required to access the data // yielded by the [Select]. -func ToSelect(ctx context.Context, txn datastore.Txn, selectRequest *request.Select) (*Select, error) { - descriptionsRepo := NewDescriptionsRepo(ctx, txn) +func ToSelect(ctx context.Context, store client.Store, selectRequest *request.Select) (*Select, error) { // the top-level select will always have index=0, and no parent collection name - return toSelect(descriptionsRepo, 0, selectRequest, "") + return toSelect(ctx, store, 0, selectRequest, "") } // toSelect converts the given [parser.Select] into a [Select]. @@ -43,29 +41,30 @@ func ToSelect(ctx context.Context, txn datastore.Txn, selectRequest *request.Sel // In the process of doing so it will construct the document map required to access the data // yielded by the [Select]. func toSelect( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, thisIndex int, selectRequest *request.Select, parentCollectionName string, ) (*Select, error) { - collectionName, err := getCollectionName(descriptionsRepo, selectRequest, parentCollectionName) + collectionName, err := getCollectionName(ctx, store, selectRequest, parentCollectionName) if err != nil { return nil, err } - mapping, desc, err := getTopLevelInfo(descriptionsRepo, selectRequest, collectionName) + mapping, collection, err := getTopLevelInfo(ctx, store, selectRequest, collectionName) if err != nil { return nil, err } - fields, aggregates, err := getRequestables(selectRequest, mapping, desc, descriptionsRepo) + fields, aggregates, err := getRequestables(ctx, selectRequest, mapping, collection, store) if err != nil { return nil, err } // Needs to be done before resolving aggregates, else filter conversion may fail there filterDependencies, err := resolveFilterDependencies( - descriptionsRepo, collectionName, selectRequest.Filter, mapping, fields) + ctx, store, collectionName, selectRequest.Filter, mapping, fields) if err != nil { return nil, err } @@ -73,28 +72,31 @@ func toSelect( // Resolve order dependencies that may have been missed due to not being rendered. err = resolveOrderDependencies( - descriptionsRepo, collectionName, selectRequest.OrderBy, mapping, &fields) + ctx, store, collectionName, selectRequest.OrderBy, mapping, &fields) if err != nil { return nil, err } aggregates = appendUnderlyingAggregates(aggregates, mapping) fields, err = resolveAggregates( + ctx, selectRequest, aggregates, fields, mapping, - desc, - descriptionsRepo, + collection, + store, ) if err != nil { return nil, err } - fields, err = resolveSecondaryRelationIDs(descriptionsRepo, desc, mapping, fields) - if err != nil { - return nil, err + if collection != nil { + fields, err = resolveSecondaryRelationIDs(ctx, store, collection, mapping, fields) + if err != nil { + return nil, err + } } // Resolve groupBy mappings i.e. alias remapping and handle missed inner group. @@ -102,7 +104,10 @@ func toSelect( groupByFields := selectRequest.GroupBy.Value().Fields // Remap all alias field names to use their internal field name mappings. for index, groupByField := range groupByFields { - fieldDesc, ok := desc.Schema.GetField(groupByField) + if collection == nil { + continue + } + fieldDesc, ok := collection.Schema().GetField(groupByField) if ok && fieldDesc.IsObject() && !fieldDesc.IsObjectArray() { groupByFields[index] = groupByField + request.RelatedObjectID } else if ok && fieldDesc.IsObjectArray() { @@ -135,7 +140,8 @@ func toSelect( // resolveOrderDependencies will map fields that were missed due to them not being requested. // Modifies the consumed existingFields and mapping accordingly. func resolveOrderDependencies( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, descName string, source immutable.Option[request.OrderBy], mapping *core.DocumentMapping, @@ -160,7 +166,7 @@ outer: joinField := fields[0] // ensure the child select is resolved for this order join - innerSelect, err := resolveChildOrder(descriptionsRepo, descName, joinField, mapping, currentExistingFields) + innerSelect, err := resolveChildOrder(ctx, store, descName, joinField, mapping, currentExistingFields) if err != nil { return err } @@ -178,7 +184,7 @@ outer: joinField := fields[0] // ensure the child select is resolved for this order join - innerSelect, err := resolveChildOrder(descriptionsRepo, descName, joinField, mapping, existingFields) + innerSelect, err := resolveChildOrder(ctx, store, descName, joinField, mapping, existingFields) if err != nil { return err } @@ -203,7 +209,8 @@ outer: // given a type join field, ensure its mapping exists // and add a coorsponding select field(s) func resolveChildOrder( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, descName string, orderChildField string, mapping *core.DocumentMapping, @@ -221,7 +228,7 @@ func resolveChildOrder( Name: orderChildField, }, } - innerSelect, err := toSelect(descriptionsRepo, index, &dummyJoinFieldSelect, descName) + innerSelect, err := toSelect(ctx, store, index, &dummyJoinFieldSelect, descName) if err != nil { return nil, err } @@ -250,12 +257,13 @@ func resolveChildOrder( // append the new target field as well as the aggregate. The mapping will also be // updated with any new fields/aggregates. func resolveAggregates( + ctx context.Context, selectRequest *request.Select, aggregates []*aggregateRequest, inputFields []Requestable, mapping *core.DocumentMapping, - desc *client.CollectionDescription, - descriptionsRepo *DescriptionsRepo, + collection client.Collection, + store client.Store, ) ([]Requestable, error) { fields := inputFields dependenciesByParentId := map[int][]int{} @@ -274,7 +282,12 @@ func resolveAggregates( var hasHost bool var convertedFilter *Filter if childIsMapped { - fieldDesc, isField := desc.Schema.GetField(target.hostExternalName) + var fieldDesc client.FieldDescription + var isField bool + if collection != nil { + fieldDesc, isField = collection.Schema().GetField(target.hostExternalName) + } + if isField && !fieldDesc.IsObject() { var order *OrderBy if target.order.HasValue() && len(target.order.Value().Conditions) > 0 { @@ -326,24 +339,29 @@ func resolveAggregates( }, } - childCollectionName, err := getCollectionName(descriptionsRepo, hostSelectRequest, desc.Name) + var collectionName string + if collection != nil { + collectionName = collection.Name() + } + + childCollectionName, err := getCollectionName(ctx, store, hostSelectRequest, collectionName) if err != nil { return nil, err } mapAggregateNestedTargets(target, hostSelectRequest, selectRequest.Root) - childMapping, childDesc, err := getTopLevelInfo(descriptionsRepo, hostSelectRequest, childCollectionName) + childMapping, childDesc, err := getTopLevelInfo(ctx, store, hostSelectRequest, childCollectionName) if err != nil { return nil, err } - childFields, _, err := getRequestables(hostSelectRequest, childMapping, childDesc, descriptionsRepo) + childFields, _, err := getRequestables(ctx, hostSelectRequest, childMapping, childDesc, store) if err != nil { return nil, err } err = resolveOrderDependencies( - descriptionsRepo, childCollectionName, target.order, childMapping, &childFields) + ctx, store, childCollectionName, target.order, childMapping, &childFields) if err != nil { return nil, err } @@ -587,10 +605,11 @@ func appendIfNotExists( // and aggregateRequests from the given selectRequest.Fields slice. It also mutates the // consumed mapping data. func getRequestables( + ctx context.Context, selectRequest *request.Select, mapping *core.DocumentMapping, - desc *client.CollectionDescription, - descriptionsRepo *DescriptionsRepo, + collection client.Collection, + store client.Store, ) (fields []Requestable, aggregates []*aggregateRequest, err error) { for _, field := range selectRequest.Fields { switch f := field.(type) { @@ -611,8 +630,12 @@ func getRequestables( }) case *request.Select: index := mapping.GetNextIndex() + var parentCollectionName string + if collection != nil { + parentCollectionName = collection.Name() + } - innerSelect, err := toSelect(descriptionsRepo, index, f, desc.Name) + innerSelect, err := toSelect(ctx, store, index, f, parentCollectionName) if err != nil { return nil, nil, err } @@ -676,7 +699,8 @@ func getAggregateRequests(index int, aggregate *request.Aggregate) (aggregateReq // getCollectionName returns the name of the selectRequest collection. This may be empty // if this is a commit request. func getCollectionName( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, selectRequest *request.Select, parentCollectionName string, ) (string, error) { @@ -692,12 +716,12 @@ func getCollectionName( } if parentCollectionName != "" { - parentDescription, err := descriptionsRepo.getCollectionDesc(parentCollectionName) + parentCollection, err := store.GetCollectionByName(ctx, parentCollectionName) if err != nil { return "", err } - hostFieldDesc, parentHasField := parentDescription.Schema.GetField(selectRequest.Name) + hostFieldDesc, parentHasField := parentCollection.Schema().GetField(selectRequest.Name) if parentHasField && hostFieldDesc.RelationType != 0 { // If this field exists on the parent, and it is a child object // then this collection name is the collection name of the child. @@ -710,28 +734,29 @@ func getCollectionName( // getTopLevelInfo returns the collection description and maps the fields directly on the object. func getTopLevelInfo( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, selectRequest *request.Select, collectionName string, -) (*core.DocumentMapping, *client.CollectionDescription, error) { +) (*core.DocumentMapping, client.Collection, error) { mapping := core.NewDocumentMapping() if _, isAggregate := request.Aggregates[selectRequest.Name]; isAggregate { // If this is a (top-level) aggregate, then it will have no collection // description, and no top-level fields, so we return an empty mapping only - return mapping, &client.CollectionDescription{}, nil + return mapping, nil, nil } if selectRequest.Root == request.ObjectSelection { mapping.Add(core.DocKeyFieldIndex, request.KeyFieldName) - desc, err := descriptionsRepo.getCollectionDesc(collectionName) + collection, err := store.GetCollectionByName(ctx, collectionName) if err != nil { return nil, nil, err } // Map all fields from schema into the map as they are fetched automatically - for _, f := range desc.Schema.Fields { + for _, f := range collection.Schema().Fields { if f.IsObject() { // Objects are skipped, as they are not fetched by default and // have to be requested via selects. @@ -746,7 +771,7 @@ func getTopLevelInfo( mapping.Add(mapping.GetNextIndex(), request.DeletedFieldName) - return mapping, &desc, nil + return mapping, collection, nil } if selectRequest.Name == request.LinksFieldName { @@ -767,11 +792,12 @@ func getTopLevelInfo( mapping.SetTypeName(request.CommitTypeName) } - return mapping, &client.CollectionDescription{}, nil + return mapping, nil, nil } func resolveFilterDependencies( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, parentCollectionName string, source immutable.Option[request.Filter], mapping *core.DocumentMapping, @@ -782,7 +808,8 @@ func resolveFilterDependencies( } return resolveInnerFilterDependencies( - descriptionsRepo, + ctx, + store, parentCollectionName, source.Value().Conditions, mapping, @@ -792,7 +819,8 @@ func resolveFilterDependencies( } func resolveInnerFilterDependencies( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, parentCollectionName string, source map[string]any, mapping *core.DocumentMapping, @@ -806,7 +834,8 @@ func resolveInnerFilterDependencies( compoundFilter := source[key].([]any) for _, innerFilter := range compoundFilter { innerFields, err := resolveInnerFilterDependencies( - descriptionsRepo, + ctx, + store, parentCollectionName, innerFilter.(map[string]any), mapping, @@ -824,7 +853,8 @@ func resolveInnerFilterDependencies( } else if key == request.FilterOpNot { notFilter := source[key].(map[string]any) innerFields, err := resolveInnerFilterDependencies( - descriptionsRepo, + ctx, + store, parentCollectionName, notFilter, mapping, @@ -868,7 +898,7 @@ func resolveInnerFilterDependencies( } } else { var err error - childSelect, err = constructEmptyJoin(descriptionsRepo, parentCollectionName, mapping, key) + childSelect, err = constructEmptyJoin(ctx, store, parentCollectionName, mapping, key) if err != nil { return nil, err } @@ -885,13 +915,14 @@ func resolveInnerFilterDependencies( } dummyParsed := &request.Select{Field: request.Field{Name: key}} - childCollectionName, err := getCollectionName(descriptionsRepo, dummyParsed, parentCollectionName) + childCollectionName, err := getCollectionName(ctx, store, dummyParsed, parentCollectionName) if err != nil { return nil, err } childFields, err := resolveInnerFilterDependencies( - descriptionsRepo, + ctx, + store, childCollectionName, childFilter, childSelect.DocumentMapping, @@ -910,7 +941,8 @@ func resolveInnerFilterDependencies( // constructEmptyJoin constructs a valid empty join with no requested fields. func constructEmptyJoin( - descriptionsRepo *DescriptionsRepo, + ctx context.Context, + store client.Store, parentCollectionName string, parentMapping *core.DocumentMapping, name string, @@ -923,12 +955,12 @@ func constructEmptyJoin( }, } - childCollectionName, err := getCollectionName(descriptionsRepo, dummyParsed, parentCollectionName) + childCollectionName, err := getCollectionName(ctx, store, dummyParsed, parentCollectionName) if err != nil { return nil, err } - childMapping, _, err := getTopLevelInfo(descriptionsRepo, dummyParsed, childCollectionName) + childMapping, _, err := getTopLevelInfo(ctx, store, dummyParsed, childCollectionName) if err != nil { return nil, err } @@ -955,8 +987,9 @@ func constructEmptyJoin( // // They copying itself is handled within [typeJoinOne]. func resolveSecondaryRelationIDs( - descriptionsRepo *DescriptionsRepo, - desc *client.CollectionDescription, + ctx context.Context, + store client.Store, + collection client.Collection, mapping *core.DocumentMapping, requestables []Requestable, ) ([]Requestable, error) { @@ -968,7 +1001,7 @@ func resolveSecondaryRelationIDs( continue } - fieldDesc, descFound := desc.Schema.GetField(existingField.Name) + fieldDesc, descFound := collection.Schema().GetField(existingField.Name) if !descFound { continue } @@ -977,7 +1010,7 @@ func resolveSecondaryRelationIDs( continue } - objectFieldDesc, descFound := desc.Schema.GetField( + objectFieldDesc, descFound := collection.Schema().GetField( strings.TrimSuffix(existingField.Name, request.RelatedObjectID), ) if !descFound { @@ -995,7 +1028,7 @@ func resolveSecondaryRelationIDs( continue } - siblingFieldDesc, descFound := desc.Schema.GetField(siblingSelect.Field.Name) + siblingFieldDesc, descFound := collection.Schema().GetField(siblingSelect.Field.Name) if !descFound { continue } @@ -1017,8 +1050,9 @@ func resolveSecondaryRelationIDs( // We only require the dockey of the related object, so an empty join is all we need. join, err := constructEmptyJoin( - descriptionsRepo, - desc.Name, + ctx, + store, + collection.Name(), mapping, objectFieldName, ) @@ -1039,10 +1073,10 @@ func resolveSecondaryRelationIDs( // yielded by the [Select] embedded in the [CommitSelect]. func ToCommitSelect( ctx context.Context, - txn datastore.Txn, + store client.Store, selectRequest *request.CommitSelect, ) (*CommitSelect, error) { - underlyingSelect, err := ToSelect(ctx, txn, selectRequest.ToSelect()) + underlyingSelect, err := ToSelect(ctx, store, selectRequest.ToSelect()) if err != nil { return nil, err } @@ -1059,8 +1093,8 @@ func ToCommitSelect( // // In the process of doing so it will construct the document map required to access the data // yielded by the [Select] embedded in the [Mutation]. -func ToMutation(ctx context.Context, txn datastore.Txn, mutationRequest *request.ObjectMutation) (*Mutation, error) { - underlyingSelect, err := ToSelect(ctx, txn, mutationRequest.ToSelect()) +func ToMutation(ctx context.Context, store client.Store, mutationRequest *request.ObjectMutation) (*Mutation, error) { + underlyingSelect, err := ToSelect(ctx, store, mutationRequest.ToSelect()) if err != nil { return nil, err } diff --git a/planner/mapper/targetable.go b/planner/mapper/targetable.go index bcfdb02ef8..0b571e6830 100644 --- a/planner/mapper/targetable.go +++ b/planner/mapper/targetable.go @@ -91,6 +91,17 @@ func (f *Filter) ToMap(mapping *core.DocumentMapping) map[string]any { return filterObjectToMap(mapping, f.Conditions) } +// HasIndex returns true if the filter has a condition that targets the +// a property with the given index. +func (f *Filter) HasIndex(index int) bool { + for k := range f.Conditions { + if propIndex, isOk := k.(*PropertyIndex); isOk && propIndex.Index == index { + return true + } + } + return false +} + func filterObjectToMap(mapping *core.DocumentMapping, obj map[connor.FilterKey]any) map[string]any { outmap := make(map[string]any) if obj == nil { diff --git a/planner/planner.go b/planner/planner.go index bcb0653633..b066e1f0e3 100644 --- a/planner/planner.go +++ b/planner/planner.go @@ -15,8 +15,10 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/connor" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/planner/filter" "github.com/sourcenetwork/defradb/planner/mapper" ) @@ -112,7 +114,7 @@ func (p *Planner) newPlan(stmt any) (planNode, error) { return p.newPlan(n.Selections[0]) case *request.Select: - m, err := mapper.ToSelect(p.ctx, p.txn, n) + m, err := mapper.ToSelect(p.ctx, p.db, n) if err != nil { return nil, err } @@ -127,14 +129,14 @@ func (p *Planner) newPlan(stmt any) (planNode, error) { return p.Select(m) case *request.CommitSelect: - m, err := mapper.ToCommitSelect(p.ctx, p.txn, n) + m, err := mapper.ToCommitSelect(p.ctx, p.db, n) if err != nil { return nil, err } return p.CommitSelect(m) case *request.ObjectMutation: - m, err := mapper.ToMutation(p.ctx, p.txn, n) + m, err := mapper.ToMutation(p.ctx, p.db, n) if err != nil { return nil, err } @@ -296,16 +298,83 @@ func (p *Planner) expandMultiNode(multiNode MultiNode, parentPlan *selectTopNode return nil } +// expandTypeIndexJoinPlan does a plan graph expansion and other optimizations on typeIndexJoin. func (p *Planner) expandTypeIndexJoinPlan(plan *typeIndexJoin, parentPlan *selectTopNode) error { switch node := plan.joinPlan.(type) { case *typeJoinOne: - return p.expandPlan(node.subType, parentPlan) + return p.expandTypeJoin(&node.invertibleTypeJoin, parentPlan) case *typeJoinMany: - return p.expandPlan(node.subType, parentPlan) + return p.expandTypeJoin(&node.invertibleTypeJoin, parentPlan) } return client.NewErrUnhandledType("join plan", plan.joinPlan) } +func findFilteredByRelationFields( + conditions map[connor.FilterKey]any, + mapping *core.DocumentMapping, +) map[string]int { + filterProperties := filter.ExtractProperties(conditions) + filteredSubFields := make(map[string]int) + for _, prop := range filterProperties { + if childMapping := mapping.ChildMappings[prop.Index]; childMapping != nil { + if !prop.IsRelation() { + continue + } + for _, subProp := range prop.Fields { + for fieldName, indices := range childMapping.IndexesByName { + if indices[0] == subProp.Index { + filteredSubFields[fieldName] = subProp.Index + } + } + } + } + } + return filteredSubFields +} + +func (p *Planner) tryOptimizeJoinDirection(node *invertibleTypeJoin, parentPlan *selectTopNode) error { + filteredSubFields := findFilteredByRelationFields( + parentPlan.selectNode.filter.Conditions, + node.documentMapping, + ) + slct := node.subType.(*selectTopNode).selectNode + desc := slct.collection.Description() + schema := slct.collection.Schema() + indexedFields := desc.CollectIndexedFields(&schema) + for _, indField := range indexedFields { + if ind, ok := filteredSubFields[indField.Name]; ok { + subInd := node.documentMapping.FirstIndexOfName(node.subTypeName) + relatedField := mapper.Field{Name: node.subTypeName, Index: subInd} + fieldFilter := filter.UnwrapRelation(filter.CopyField( + parentPlan.selectNode.filter, + relatedField, + mapper.Field{Name: indField.Name, Index: ind}, + ), relatedField) + err := node.invertJoinDirectionWithIndex(fieldFilter, indField) + if err != nil { + return err + } + break + } + } + + return nil +} + +// expandTypeJoin does a plan graph expansion and other optimizations on invertibleTypeJoin. +func (p *Planner) expandTypeJoin(node *invertibleTypeJoin, parentPlan *selectTopNode) error { + if parentPlan.selectNode.filter == nil { + return p.expandPlan(node.subType, parentPlan) + } + + err := p.tryOptimizeJoinDirection(node, parentPlan) + if err != nil { + return err + } + + return p.expandPlan(node.subType, parentPlan) +} + func (p *Planner) expandGroupNodePlan(topNodeSelect *selectTopNode) error { var sourceNode planNode var hasJoinNode bool @@ -344,7 +413,7 @@ func (p *Planner) expandGroupNodePlan(topNodeSelect *selectTopNode) error { childSelect, pipe, false, - &topNodeSelect.selectNode.sourceInfo, + topNodeSelect.selectNode.collection, ) if err != nil { return err @@ -406,9 +475,9 @@ func (p *Planner) walkAndReplacePlan(planNode, target, replace planNode) error { case *selectNode: node.source = replace case *typeJoinOne: - node.root = replace + node.replaceRoot(replace) case *typeJoinMany: - node.root = replace + node.replaceRoot(replace) case *pipeNode: /* Do nothing - pipe nodes should not be replaced */ // @todo: add more nodes that apply here diff --git a/planner/scan.go b/planner/scan.go index 256711b34e..64a534da6d 100644 --- a/planner/scan.go +++ b/planner/scan.go @@ -11,12 +11,15 @@ package planner import ( + "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/db/base" "github.com/sourcenetwork/defradb/db/fetcher" "github.com/sourcenetwork/defradb/lens" + "github.com/sourcenetwork/defradb/planner/filter" "github.com/sourcenetwork/defradb/planner/mapper" "github.com/sourcenetwork/defradb/request/graphql/parser" ) @@ -35,8 +38,8 @@ type scanNode struct { documentIterator docMapper - p *Planner - desc client.CollectionDescription + p *Planner + col client.Collection fields []client.FieldDescription @@ -62,7 +65,7 @@ func (n *scanNode) Init() error { if err := n.fetcher.Init( n.p.ctx, n.p.txn, - &n.desc, + n.col, n.fields, n.filter, n.slct.DocumentMapping, @@ -74,8 +77,8 @@ func (n *scanNode) Init() error { return n.initScan() } -func (n *scanNode) initCollection(desc client.CollectionDescription) error { - n.desc = desc +func (n *scanNode) initCollection(col client.Collection) error { + n.col = col return n.initFields(n.slct.Fields) } @@ -90,7 +93,7 @@ func (n *scanNode) initFields(fields []mapper.Requestable) error { n.tryAddField(requestable.GetName()) // select might have its own select fields and filters fields case *mapper.Select: - n.tryAddField(requestable.Field.Name + "_id") // foreign key for type joins + n.tryAddField(requestable.Field.Name + request.RelatedObjectID) // foreign key for type joins err := n.initFields(requestable.Fields) if err != nil { return err @@ -101,7 +104,7 @@ func (n *scanNode) initFields(fields []mapper.Requestable) error { if target.Filter != nil { fieldDescs, err := parser.ParseFilterFieldsForDescription( target.Filter.ExternalConditions, - n.desc.Schema, + n.col.Schema(), ) if err != nil { return err @@ -122,7 +125,7 @@ func (n *scanNode) initFields(fields []mapper.Requestable) error { } func (n *scanNode) tryAddField(fieldName string) bool { - fd, ok := n.desc.Schema.GetField(fieldName) + fd, ok := n.col.Schema().GetField(fieldName) if !ok { // skip fields that are not part of the // schema description. The scanner (and fetcher) @@ -133,6 +136,32 @@ func (n *scanNode) tryAddField(fieldName string) bool { return true } +func (scan *scanNode) initFetcher( + cid immutable.Option[string], + indexedField immutable.Option[client.FieldDescription], +) { + var f fetcher.Fetcher + if cid.HasValue() { + f = new(fetcher.VersionedFetcher) + } else { + f = new(fetcher.DocumentFetcher) + + if indexedField.HasValue() { + typeIndex := scan.documentMapping.FirstIndexOfName(indexedField.Value().Name) + field := mapper.Field{Index: typeIndex, Name: indexedField.Value().Name} + var indexFilter *mapper.Filter + scan.filter, indexFilter = filter.SplitByField(scan.filter, field) + if indexFilter != nil { + fieldDesc, _ := scan.col.Schema().GetField(indexedField.Value().Name) + f = fetcher.NewIndexFetcher(f, fieldDesc, indexFilter) + } + } + + f = lens.NewFetcher(f, scan.p.db.LensRegistry()) + } + scan.fetcher = f +} + // Start starts the internal logic of the scanner // like the DocumentFetcher, and more. func (n *scanNode) Start() error { @@ -141,7 +170,7 @@ func (n *scanNode) Start() error { func (n *scanNode) initScan() error { if !n.spans.HasValue { - start := base.MakeCollectionKey(n.desc) + start := base.MakeCollectionKey(n.col.Description()) n.spans = core.NewSpans(core.NewSpan(start, start.PrefixEnd())) } @@ -223,8 +252,8 @@ func (n *scanNode) simpleExplain() (map[string]any, error) { } // Add the collection attributes. - simpleExplainMap[collectionNameLabel] = n.desc.Name - simpleExplainMap[collectionIDLabel] = n.desc.IDString() + simpleExplainMap[collectionNameLabel] = n.col.Name() + simpleExplainMap[collectionIDLabel] = n.col.Description().IDString() // Add the spans attribute. simpleExplainMap[spansLabel] = n.explainSpans() @@ -237,6 +266,7 @@ func (n *scanNode) executeExplain() map[string]any { "iterations": n.execInfo.iterations, "docFetches": n.execInfo.fetches.DocsFetched, "fieldFetches": n.execInfo.fetches.FieldsFetched, + "indexFetches": n.execInfo.fetches.IndexesFetched, } } @@ -258,26 +288,21 @@ func (n *scanNode) Explain(explainType request.ExplainType) (map[string]any, err // Merge implements mergeNode func (n *scanNode) Merge() bool { return true } -func (p *Planner) Scan(parsed *mapper.Select) (*scanNode, error) { - var f fetcher.Fetcher - if parsed.Cid.HasValue() { - f = new(fetcher.VersionedFetcher) - } else { - f = new(fetcher.DocumentFetcher) - f = lens.NewFetcher(f, p.db.LensRegistry()) - } +func (p *Planner) Scan( + mapperSelect *mapper.Select, + colDesc client.CollectionDescription, +) (*scanNode, error) { scan := &scanNode{ p: p, - fetcher: f, - slct: parsed, - docMapper: docMapper{parsed.DocumentMapping}, + slct: mapperSelect, + docMapper: docMapper{mapperSelect.DocumentMapping}, } - colDesc, err := p.getCollectionDesc(parsed.CollectionName) + col, err := p.db.GetCollectionByName(p.ctx, mapperSelect.CollectionName) if err != nil { return nil, err } - err = scan.initCollection(colDesc) + err = scan.initCollection(col) if err != nil { return nil, err } @@ -294,8 +319,6 @@ func (p *Planner) Scan(parsed *mapper.Select) (*scanNode, error) { // we call Next() on the underlying scanNode only // once every 2 Next() calls on the multiScan type multiScanNode struct { - docMapper - scanNode *scanNode numReaders int numCalls int @@ -349,6 +372,10 @@ func (n *multiScanNode) Close() error { return n.scanNode.Close() } +func (n *multiScanNode) DocumentMap() *core.DocumentMapping { + return n.scanNode.DocumentMap() +} + func (n *multiScanNode) addReader() { n.numReaders++ } diff --git a/planner/select.go b/planner/select.go index 4fb9b143f2..20c0dd43ba 100644 --- a/planner/select.go +++ b/planner/select.go @@ -14,6 +14,7 @@ import ( cid "github.com/ipfs/go-cid" "github.com/sourcenetwork/immutable" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/core" "github.com/sourcenetwork/defradb/db/base" @@ -101,9 +102,7 @@ type selectNode struct { // was created origSource planNode - // cache information about the original data source - // collection name, meta-data, etc. - sourceInfo sourceInfo + collection client.Collection // top level filter expression // filter is split between select, scan, and typeIndexJoin. @@ -244,16 +243,16 @@ func (n *selectNode) initSource() ([]aggregateNode, error) { } n.source = sourcePlan.plan n.origSource = sourcePlan.plan - n.sourceInfo = sourcePlan.info + n.collection = sourcePlan.collection // split filter // apply the root filter to the source // and rootSubType filters to the selectNode // @todo: simulate splitting for now - origScan, ok := n.source.(*scanNode) - if ok { - origScan.filter = n.filter + origScan, isScanNode := n.source.(*scanNode) + if isScanNode { origScan.showDeleted = n.selectReq.ShowDeleted + origScan.filter = n.filter n.filter = nil // If we have both a DocKey and a CID, then we need to run @@ -278,14 +277,39 @@ func (n *selectNode) initSource() ([]aggregateNode, error) { // instead of a prefix scan + filter via the Primary Index (0), like here: spans := make([]core.Span, len(n.selectReq.DocKeys.Value())) for i, docKey := range n.selectReq.DocKeys.Value() { - dockeyIndexKey := base.MakeDocKey(sourcePlan.info.collectionDescription, docKey) + dockeyIndexKey := base.MakeDocKey(sourcePlan.collection.Description(), docKey) spans[i] = core.NewSpan(dockeyIndexKey, dockeyIndexKey.PrefixEnd()) } origScan.Spans(core.NewSpans(spans...)) } } - return n.initFields(n.selectReq) + aggregates, err := n.initFields(n.selectReq) + if err != nil { + return nil, err + } + + if isScanNode { + origScan.initFetcher(n.selectReq.Cid, findFilteredByIndexedField(origScan)) + } + + return aggregates, nil +} + +func findFilteredByIndexedField(scanNode *scanNode) immutable.Option[client.FieldDescription] { + if scanNode.filter != nil { + schema := scanNode.col.Schema() + indexedFields := scanNode.col.Description().CollectIndexedFields(&schema) + for i := range indexedFields { + typeIndex := scanNode.documentMapping.FirstIndexOfName(indexedFields[i].Name) + if scanNode.filter.HasIndex(typeIndex) { + // we return the first found indexed field to keep it simple for now + // more sophisticated optimization logic can be added later + return immutable.Some(indexedFields[i]) + } + } + } + return immutable.None[client.FieldDescription]() } func (n *selectNode) initFields(selectReq *mapper.Select) ([]aggregateNode, error) { @@ -375,36 +399,11 @@ func (n *selectNode) addTypeIndexJoin(subSelect *mapper.Select) error { func (n *selectNode) Source() planNode { return n.source } -// func appendSource() {} - -// func (n *selectNode) initRender( -// fields []*client.FieldDescription, -// aliases []string, -//) error { -// return n.planner.render(fields, aliases) -// } - -// SubSelect is used for creating Select nodes used on sub selections, -// not to be used on the top level selection node. -// This allows us to disable rendering on all sub Select nodes -// and only run it at the end on the top level select node. -func (p *Planner) SubSelect(selectReq *mapper.Select) (planNode, error) { - plan, err := p.Select(selectReq) - if err != nil { - return nil, err - } - - // if this is a sub select plan, we need to remove the render node - // as the final top level selectTopNode will handle all sub renders - top := plan.(*selectTopNode) - return top, nil -} - func (p *Planner) SelectFromSource( selectReq *mapper.Select, source planNode, fromCollection bool, - providedSourceInfo *sourceInfo, + collection client.Collection, ) (planNode, error) { s := &selectNode{ planner: p, @@ -419,17 +418,17 @@ func (p *Planner) SelectFromSource( orderBy := selectReq.OrderBy groupBy := selectReq.GroupBy - if providedSourceInfo != nil { - s.sourceInfo = *providedSourceInfo + if collection != nil { + s.collection = collection } if fromCollection { - desc, err := p.getCollectionDesc(selectReq.Name) + col, err := p.db.GetCollectionByName(p.ctx, selectReq.Name) if err != nil { return nil, err } - s.sourceInfo = sourceInfo{desc} + s.collection = col } aggregates, err := s.initFields(selectReq) diff --git a/planner/sum.go b/planner/sum.go index 0e1690898e..85371e5a30 100644 --- a/planner/sum.go +++ b/planner/sum.go @@ -77,12 +77,12 @@ func (p *Planner) isValueFloat( } if !source.ChildTarget.HasValue { - parentDescription, err := p.getCollectionDesc(parent.CollectionName) + parentCol, err := p.db.GetCollectionByName(p.ctx, parent.CollectionName) if err != nil { return false, err } - fieldDescription, fieldDescriptionFound := parentDescription.Schema.GetField(source.Name) + fieldDescription, fieldDescriptionFound := parentCol.Schema().GetField(source.Name) if !fieldDescriptionFound { return false, client.NewErrFieldNotExist(source.Name) } @@ -125,12 +125,12 @@ func (p *Planner) isValueFloat( return false, nil } - childCollectionDescription, err := p.getCollectionDesc(child.CollectionName) + childCol, err := p.db.GetCollectionByName(p.ctx, child.CollectionName) if err != nil { return false, err } - fieldDescription, fieldDescriptionFound := childCollectionDescription.Schema.GetField(source.ChildTarget.Name) + fieldDescription, fieldDescriptionFound := childCol.Schema().GetField(source.ChildTarget.Name) if !fieldDescriptionFound { return false, client.NewErrFieldNotExist(source.ChildTarget.Name) } diff --git a/planner/type_join.go b/planner/type_join.go index f37437089e..47ba07e96b 100644 --- a/planner/type_join.go +++ b/planner/type_join.go @@ -81,8 +81,7 @@ func (p *Planner) makeTypeIndexJoin( var joinPlan planNode var err error - desc := parent.sourceInfo.collectionDescription - typeFieldDesc, ok := desc.Schema.GetField(subType.Name) + typeFieldDesc, ok := parent.collection.Schema().GetField(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } @@ -153,14 +152,14 @@ func (n *typeIndexJoin) simpleExplain() (map[string]any, error) { switch joinType := n.joinPlan.(type) { case *typeJoinOne: // Add the direction attribute. - if joinType.primary { - simpleExplainMap[joinDirectionLabel] = joinDirectionPrimaryLabel - } else { + if joinType.isSecondary { simpleExplainMap[joinDirectionLabel] = joinDirectionSecondaryLabel + } else { + simpleExplainMap[joinDirectionLabel] = joinDirectionPrimaryLabel } // Add the attribute(s). - simpleExplainMap[joinRootLabel] = joinType.subTypeFieldName + simpleExplainMap[joinRootLabel] = joinType.rootName simpleExplainMap[joinSubTypeNameLabel] = joinType.subTypeName subTypeExplainGraph, err := buildSimpleExplainGraph(joinType.subType) @@ -199,9 +198,24 @@ func (n *typeIndexJoin) Explain(explainType request.ExplainType) (map[string]any return n.simpleExplain() case request.ExecuteExplain: - return map[string]any{ + result := map[string]any{ "iterations": n.execInfo.iterations, - }, nil + } + var subScan *scanNode + if joinMany, isJoinMany := n.joinPlan.(*typeJoinMany); isJoinMany { + subScan = getScanNode(joinMany.subType) + } + if joinOne, isJoinOne := n.joinPlan.(*typeJoinOne); isJoinOne { + subScan = getScanNode(joinOne.subType) + } + if subScan != nil { + subScanExplain, err := subScan.Explain(explainType) + if err != nil { + return nil, err + } + result["subTypeScanNode"] = subScanExplain + } + return result, nil default: return nil, ErrUnknownExplainRequestType @@ -214,22 +228,7 @@ func (n *typeIndexJoin) Merge() bool { return true } // typeJoinOne is the plan node for a type index join // where the root type is the primary in a one-to-one relation request. type typeJoinOne struct { - documentIterator - docMapper - - p *Planner - - root planNode - subType planNode - - subTypeName string - subTypeFieldName string - - primary bool - secondaryFieldIndex immutable.Option[int] - - spans core.Spans - subSelect *mapper.Select + invertibleTypeJoin } func (p *Planner) makeTypeJoinOne( @@ -239,13 +238,13 @@ func (p *Planner) makeTypeJoinOne( ) (*typeJoinOne, error) { prepareScanNodeFilterForTypeJoin(parent, source, subType) - selectPlan, err := p.SubSelect(subType) + selectPlan, err := p.Select(subType) if err != nil { return nil, err } // get the correct sub field schema type (collection) - subTypeFieldDesc, ok := parent.sourceInfo.collectionDescription.Schema.GetField(subType.Name) + subTypeFieldDesc, ok := parent.collection.Schema().GetField(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } @@ -254,12 +253,18 @@ func (p *Planner) makeTypeJoinOne( // check if the field we're querying is the primary side of the relation isPrimary := subTypeFieldDesc.RelationType.IsSet(client.Relation_Type_Primary) - subTypeCollectionDesc, err := p.getCollectionDesc(subType.CollectionName) + subTypeCol, err := p.db.GetCollectionByName(p.ctx, subType.CollectionName) if err != nil { return nil, err } + subTypeSchema := subTypeCol.Schema() - subTypeField, subTypeFieldNameFound := subTypeCollectionDesc.GetRelation(subTypeFieldDesc.RelationName) + subTypeField, subTypeFieldNameFound := subTypeCol.Description().GetFieldByRelation( + subTypeFieldDesc.RelationName, + parent.collection.Name(), + subTypeFieldDesc.Name, + &subTypeSchema, + ) if !subTypeFieldNameFound { return nil, client.NewErrFieldNotExist(subTypeFieldDesc.RelationName) } @@ -272,16 +277,26 @@ func (p *Planner) makeTypeJoinOne( ) } + dir := joinDirection{ + firstNode: source, + secondNode: selectPlan, + secondaryField: subTypeField.Name + request.RelatedObjectID, + primaryField: subTypeFieldDesc.Name + request.RelatedObjectID, + } + return &typeJoinOne{ - p: p, - root: source, - subSelect: subType, - subTypeName: subType.Name, - subTypeFieldName: subTypeField.Name, - subType: selectPlan, - primary: isPrimary, - secondaryFieldIndex: secondaryFieldIndex, - docMapper: docMapper{parent.documentMapping}, + invertibleTypeJoin: invertibleTypeJoin{ + docMapper: docMapper{parent.documentMapping}, + root: source, + subType: selectPlan, + subSelect: subType, + rootName: subTypeField.Name, + subTypeName: subType.Name, + isSecondary: !isPrimary, + secondaryFieldIndex: secondaryFieldIndex, + secondaryFetchLimit: 1, + dir: dir, + }, }, nil } @@ -289,139 +304,36 @@ func (n *typeJoinOne) Kind() string { return "typeJoinOne" } -func (n *typeJoinOne) Init() error { - if err := n.subType.Init(); err != nil { - return err - } - return n.root.Init() -} - -func (n *typeJoinOne) Start() error { - if err := n.subType.Start(); err != nil { - return err - } - return n.root.Start() -} - -func (n *typeJoinOne) Spans(spans core.Spans) { - n.root.Spans(spans) -} - -func (n *typeJoinOne) Next() (bool, error) { - hasNext, err := n.root.Next() - if err != nil || !hasNext { - return hasNext, err - } - - doc := n.root.Value() - if n.primary { - n.currentValue, err = n.valuesPrimary(doc) - } else { - n.currentValue, err = n.valuesSecondary(doc) - } - - if err != nil { - return false, err - } - - return true, nil -} - -func (n *typeJoinOne) valuesSecondary(doc core.Doc) (core.Doc, error) { - propIndex := n.subType.DocumentMap().FirstIndexOfName(n.subTypeFieldName + request.RelatedObjectID) - // using the doc._key as a filter - setSubTypeFilterToScanNode(n.subType, propIndex, doc.GetKey()) - - // We have to reset the scan node after appending the new key-filter - if err := n.subType.Init(); err != nil { - return doc, NewErrSubTypeInit(err) - } - - next, err := n.subType.Next() - if !next || err != nil { - return doc, err - } - - subDoc := n.subType.Value() - doc.Fields[n.subSelect.Index] = subDoc +func fetchDocsWithFieldValue(plan planNode, fieldName string, val any, limit uint) ([]core.Doc, error) { + propIndex := plan.DocumentMap().FirstIndexOfName(fieldName) + setSubTypeFilterToScanNode(plan, propIndex, val) - if n.secondaryFieldIndex.HasValue() { - doc.Fields[n.secondaryFieldIndex.Value()] = subDoc.GetKey() + if err := plan.Init(); err != nil { + return nil, NewErrSubTypeInit(err) } - return doc, nil -} - -func (n *typeJoinOne) valuesPrimary(doc core.Doc) (core.Doc, error) { - // get the subtype doc key - subDocKey := n.docMapper.documentMapping.FirstOfName(doc, n.subTypeName+request.RelatedObjectID) - - subDocKeyStr, ok := subDocKey.(string) - if !ok { - return doc, nil - } - - // create the collection key for the sub doc - slct := n.subType.(*selectTopNode).selectNode - desc := slct.sourceInfo.collectionDescription - subKeyIndexKey := base.MakeDocKey(desc, subDocKeyStr) - - // reset span - n.spans = core.NewSpans(core.NewSpan(subKeyIndexKey, subKeyIndexKey.PrefixEnd())) - - // do a point lookup with the new span (index key) - n.subType.Spans(n.spans) - - // re-initialize the sub type plan - if err := n.subType.Init(); err != nil { - return doc, NewErrSubTypeInit(err) - } - - // if we don't find any docs from our point span lookup - // or if we encounter an error just return the base doc, - // with an empty map for the subDoc - next, err := n.subType.Next() + docs := make([]core.Doc, 0, limit) + for { + next, err := plan.Next() + if err != nil { + return nil, err + } + if !next { + break + } - if err != nil { - return doc, err - } + docs = append(docs, plan.Value()) - if !next { - return doc, nil + if limit > 0 && len(docs) >= int(limit) { + break + } } - subDoc := n.subType.Value() - doc.Fields[n.subSelect.Index] = subDoc - - return doc, nil -} - -func (n *typeJoinOne) Close() error { - err := n.root.Close() - if err != nil { - return err - } - return n.subType.Close() + return docs, nil } -func (n *typeJoinOne) Source() planNode { return n.root } - type typeJoinMany struct { - documentIterator - docMapper - - p *Planner - - // the main type that is at the parent level of the request. - root planNode - rootName string - // the index to use to gather the subtype IDs - index *scanNode - // the subtype plan to get the subtype docs - subType planNode - subTypeName string - - subSelect *mapper.Select + invertibleTypeJoin } func prepareScanNodeFilterForTypeJoin( @@ -466,34 +378,52 @@ func (p *Planner) makeTypeJoinMany( ) (*typeJoinMany, error) { prepareScanNodeFilterForTypeJoin(parent, source, subType) - selectPlan, err := p.SubSelect(subType) + selectPlan, err := p.Select(subType) if err != nil { return nil, err } - subTypeFieldDesc, ok := parent.sourceInfo.collectionDescription.Schema.GetField(subType.Name) + subTypeFieldDesc, ok := parent.collection.Schema().GetField(subType.Name) if !ok { return nil, client.NewErrFieldNotExist(subType.Name) } - subTypeCollectionDesc, err := p.getCollectionDesc(subType.CollectionName) + subTypeCol, err := p.db.GetCollectionByName(p.ctx, subType.CollectionName) if err != nil { return nil, err } + subTypeSchema := subTypeCol.Schema() + + rootField, rootNameFound := subTypeCol.Description().GetFieldByRelation( + subTypeFieldDesc.RelationName, + parent.collection.Name(), + subTypeFieldDesc.Name, + &subTypeSchema, + ) - rootField, rootNameFound := subTypeCollectionDesc.GetRelation(subTypeFieldDesc.RelationName) if !rootNameFound { return nil, client.NewErrFieldNotExist(subTypeFieldDesc.RelationName) } + dir := joinDirection{ + firstNode: source, + secondNode: selectPlan, + secondaryField: rootField.Name + request.RelatedObjectID, + primaryField: subTypeFieldDesc.Name + request.RelatedObjectID, + } + return &typeJoinMany{ - p: p, - root: source, - subSelect: subType, - subTypeName: subType.Name, - rootName: rootField.Name, - subType: selectPlan, - docMapper: docMapper{parent.documentMapping}, + invertibleTypeJoin: invertibleTypeJoin{ + docMapper: docMapper{parent.documentMapping}, + root: source, + subType: selectPlan, + subSelect: subType, + rootName: rootField.Name, + isSecondary: true, + subTypeName: subType.Name, + secondaryFetchLimit: 0, + dir: dir, + }, }, nil } @@ -501,77 +431,194 @@ func (n *typeJoinMany) Kind() string { return "typeJoinMany" } -func (n *typeJoinMany) Init() error { - if err := n.subType.Init(); err != nil { - return err +func fetchPrimaryDoc(node, subNode planNode, parentProp string) (bool, error) { + subDoc := subNode.Value() + ind := subNode.DocumentMap().FirstIndexOfName(parentProp) + + docKeyStr, isStr := subDoc.Fields[ind].(string) + if !isStr { + return false, nil + } + + scan := getScanNode(node) + if scan == nil { + return false, nil + } + rootDocKey := base.MakeDocKey(scan.col.Description(), docKeyStr) + + spans := core.NewSpans(core.NewSpan(rootDocKey, rootDocKey.PrefixEnd())) + + node.Spans(spans) + + if err := node.Init(); err != nil { + return false, NewErrSubTypeInit(err) + } + + hasValue, err := node.Next() + + if err != nil || !hasValue { + return false, err + } + + return true, nil +} + +type joinDirection struct { + firstNode planNode + secondNode planNode + secondaryField string + primaryField string + isInverted bool +} + +func (dir *joinDirection) invert() { + dir.isInverted = !dir.isInverted + dir.firstNode, dir.secondNode = dir.secondNode, dir.firstNode + dir.secondaryField, dir.primaryField = dir.primaryField, dir.secondaryField +} + +type invertibleTypeJoin struct { + documentIterator + docMapper + + root planNode + subType planNode + rootName string + subTypeName string + + subSelect *mapper.Select + + isSecondary bool + secondaryFieldIndex immutable.Option[int] + secondaryFetchLimit uint + + dir joinDirection +} + +func (join *invertibleTypeJoin) replaceRoot(node planNode) { + join.root = node + if join.dir.isInverted { + join.dir.secondNode = node + } else { + join.dir.firstNode = node } - return n.root.Init() } -func (n *typeJoinMany) Start() error { - if err := n.subType.Start(); err != nil { +func (join *invertibleTypeJoin) Init() error { + if err := join.subType.Init(); err != nil { return err } - return n.root.Start() + return join.root.Init() } -func (n *typeJoinMany) Spans(spans core.Spans) { - n.root.Spans(spans) +func (join *invertibleTypeJoin) Start() error { + if err := join.subType.Start(); err != nil { + return err + } + return join.root.Start() } -func (n *typeJoinMany) Next() (bool, error) { - hasNext, err := n.root.Next() - if err != nil || !hasNext { - return hasNext, err +func (join *invertibleTypeJoin) Close() error { + if err := join.root.Close(); err != nil { + return err } - n.currentValue = n.root.Value() + return join.subType.Close() +} + +func (join *invertibleTypeJoin) Spans(spans core.Spans) { + join.root.Spans(spans) +} - // check if theres an index - // if there is, scan and aggregate results - // if not, then manually scan the subtype table - subDocs := make([]core.Doc, 0) - if n.index != nil { - // @todo: handle index for one-to-many setup +func (join *invertibleTypeJoin) Source() planNode { return join.root } + +func (tj *invertibleTypeJoin) invert() { + tj.dir.invert() + tj.isSecondary = !tj.isSecondary +} + +func (join *invertibleTypeJoin) processSecondResult(secondDocs []core.Doc) (any, any) { + var secondResult any + var secondIDResult any + if join.secondaryFetchLimit == 1 { + if len(secondDocs) != 0 { + secondResult = secondDocs[0] + secondIDResult = secondDocs[0].GetKey() + } } else { - propIndex := n.subSelect.FirstIndexOfName(n.rootName + request.RelatedObjectID) - // using the doc._key as a filter - setSubTypeFilterToScanNode(n.subType, propIndex, n.currentValue.GetKey()) + secondResult = secondDocs + secondDocKeys := make([]string, len(secondDocs)) + for i, doc := range secondDocs { + secondDocKeys[i] = doc.GetKey() + } + secondIDResult = secondDocKeys + } + join.root.Value().Fields[join.subSelect.Index] = secondResult + if join.secondaryFieldIndex.HasValue() { + join.root.Value().Fields[join.secondaryFieldIndex.Value()] = secondIDResult + } + return secondResult, secondIDResult +} + +func (join *invertibleTypeJoin) Next() (bool, error) { + hasFirstValue, err := join.dir.firstNode.Next() + + if err != nil || !hasFirstValue { + return false, err + } - // reset scan node - if err := n.subType.Init(); err != nil { + firstDoc := join.dir.firstNode.Value() + + if join.isSecondary { + secondDocs, err := fetchDocsWithFieldValue( + join.dir.secondNode, + join.dir.secondaryField, + firstDoc.GetKey(), + join.secondaryFetchLimit, + ) + if err != nil { return false, err } - - for { - next, err := n.subType.Next() - if err != nil { - return false, err - } - if !next { - break + if join.dir.secondNode == join.root { + join.root.Value().Fields[join.subSelect.Index] = join.subType.Value() + } else { + secondResult, secondIDResult := join.processSecondResult(secondDocs) + join.dir.firstNode.Value().Fields[join.subSelect.Index] = secondResult + if join.secondaryFieldIndex.HasValue() { + join.dir.firstNode.Value().Fields[join.secondaryFieldIndex.Value()] = secondIDResult } + } + } else { + hasDoc, err := fetchPrimaryDoc(join.dir.secondNode, join.dir.firstNode, join.dir.primaryField) + if err != nil { + return false, err + } - subDoc := n.subType.Value() - subDocs = append(subDocs, subDoc) + if hasDoc { + join.root.Value().Fields[join.subSelect.Index] = join.subType.Value() } } - n.currentValue.Fields[n.subSelect.Index] = subDocs + join.currentValue = join.root.Value() + return true, nil } -func (n *typeJoinMany) Close() error { - if err := n.root.Close(); err != nil { - return err - } +func (join *invertibleTypeJoin) invertJoinDirectionWithIndex( + fieldFilter *mapper.Filter, + field client.FieldDescription, +) error { + subScan := getScanNode(join.subType) + subScan.tryAddField(join.rootName + request.RelatedObjectID) + subScan.filter = fieldFilter + subScan.initFetcher(immutable.Option[string]{}, immutable.Some(field)) - return n.subType.Close() -} + join.invert() -func (n *typeJoinMany) Source() planNode { return n.root } + return nil +} -func setSubTypeFilterToScanNode(plan planNode, propIndex int, key string) { +func setSubTypeFilterToScanNode(plan planNode, propIndex int, val any) { scan := getScanNode(plan) if scan == nil { return @@ -584,7 +631,7 @@ func setSubTypeFilterToScanNode(plan planNode, propIndex int, key string) { propertyIndex := &mapper.PropertyIndex{Index: propIndex} filterConditions := map[connor.FilterKey]any{ propertyIndex: map[connor.FilterKey]any{ - mapper.FilterEqOp: key, + mapper.FilterEqOp: val, }, } @@ -600,6 +647,11 @@ func getScanNode(plan planNode) *scanNode { return scanNode } node = node.Source() + if node == nil { + if topSelect, ok := plan.(*selectTopNode); ok { + node = topSelect.selectNode + } + } } return nil } diff --git a/playground/package-lock.json b/playground/package-lock.json index 952d577128..f3252de4aa 100644 --- a/playground/package-lock.json +++ b/playground/package-lock.json @@ -8,25 +8,24 @@ "name": "playground", "version": "0.0.0", "dependencies": { - "@tanstack/react-query": "^4.35.3", - "fast-json-patch": "^3.1.1", - "graphiql": "^3.0.5", - "graphql": "^16.8.0", + "graphiql": "^3.0.9", + "graphql": "^16.8.1", "react": "^18.2.0", "react-dom": "^18.2.0", - "react-hook-form": "^7.46.1" + "swagger-ui-react": "^5.9.4" }, "devDependencies": { - "@types/react": "^18.2.21", - "@types/react-dom": "^18.2.7", - "@typescript-eslint/eslint-plugin": "^6.7.0", - "@typescript-eslint/parser": "^6.7.0", - "@vitejs/plugin-react-swc": "^3.0.0", - "eslint": "^8.49.0", + "@types/react": "^18.2.37", + "@types/react-dom": "^18.2.15", + "@types/swagger-ui-react": "^4.18.1", + "@typescript-eslint/eslint-plugin": "^6.10.0", + "@typescript-eslint/parser": "^6.11.0", + "@vitejs/plugin-react-swc": "^3.4.1", + "eslint": "^8.53.0", "eslint-plugin-react-hooks": "^4.6.0", - "eslint-plugin-react-refresh": "^0.4.3", + "eslint-plugin-react-refresh": "^0.4.4", "typescript": "^5.2.2", - "vite": "^4.4.9" + "vite": "^4.5.0" } }, "node_modules/@aashutoshrathi/word-wrap": { @@ -39,16 +38,33 @@ } }, "node_modules/@babel/runtime": { - "version": "7.22.6", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.6.tgz", - "integrity": "sha512-wDb5pWm4WDdF6LFUde3Jl8WzPA+3ZbxYqkC6xAXuD3irdEHN1k0NfTRrJD8ZD378SJ61miMLCqIOXYhd8x+AJQ==", + "version": "7.22.15", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.22.15.tgz", + "integrity": "sha512-T0O+aa+4w0u06iNmapipJXMV4HoUir03hpx3/YqXXhu9xim3w+dVphjFWl1OH8NbZHw5Lbm9k45drDkgq2VNNA==", "dependencies": { - "regenerator-runtime": "^0.13.11" + "regenerator-runtime": "^0.14.0" }, "engines": { "node": ">=6.9.0" } }, + "node_modules/@babel/runtime-corejs3": { + "version": "7.23.2", + "resolved": "https://registry.npmjs.org/@babel/runtime-corejs3/-/runtime-corejs3-7.23.2.tgz", + "integrity": "sha512-54cIh74Z1rp4oIjsHjqN+WM4fMyCBYe+LpZ9jWm51CZ1fbH3SkAzQD/3XLoNkjbJ7YEmjobLXyvQrFypRHOrXw==", + "dependencies": { + "core-js-pure": "^3.30.2", + "regenerator-runtime": "^0.14.0" + }, + "engines": { + "node": ">=6.9.0" + } + }, + "node_modules/@braintree/sanitize-url": { + "version": "6.0.4", + "resolved": "https://registry.npmjs.org/@braintree/sanitize-url/-/sanitize-url-6.0.4.tgz", + "integrity": "sha512-s3jaWicZd0pkP0jf5ysyHUI/RE7MHos6qlToFcGWXVp+ykHOy77OUMrfbgJ9it2C5bow7OIQwYYaHjk9XlBQ2A==" + }, "node_modules/@codemirror/language": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.0.0.tgz", @@ -64,19 +80,19 @@ } }, "node_modules/@codemirror/state": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.2.1.tgz", - "integrity": "sha512-RupHSZ8+OjNT38zU9fKH2sv+Dnlr8Eb8sl4NOnnqz95mCFTZUaiRP8Xv5MeeaG0px2b8Bnfe7YGwCV3nsBhbuw==", + "version": "6.3.1", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.3.1.tgz", + "integrity": "sha512-88e4HhMtKJyw6fKprGaN/yZfiaoGYOi2nM45YCUC6R/kex9sxFWBDGatS1vk4lMgnWmdIIB9tk8Gj1LmL8YfvA==", "peer": true }, "node_modules/@codemirror/view": { - "version": "6.16.0", - "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.16.0.tgz", - "integrity": "sha512-1Z2HkvkC3KR/oEZVuW9Ivmp8TWLzGEd8T8TA04TTwPvqogfkHBdYSlflytDOqmkUxM2d1ywTg7X2dU5mC+SXvg==", + "version": "6.22.0", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.22.0.tgz", + "integrity": "sha512-6zLj4YIoIpfTGKrDMTbeZRpa8ih4EymMCKmddEDcJWrCdp/N1D46B38YEz4creTb4T177AVS9EyXkLeC/HL2jA==", "peer": true, "dependencies": { "@codemirror/state": "^6.1.4", - "style-mod": "^4.0.0", + "style-mod": "^4.1.0", "w3c-keyname": "^2.2.4" } }, @@ -472,9 +488,9 @@ } }, "node_modules/@eslint/eslintrc": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.2.tgz", - "integrity": "sha512-+wvgpDsrB1YqAMdEUCcnTlpfVBH7Vqn6A/NT3D8WVXFIaKMlErPIZT3oCIAVCOtarRpMtelZLqJeU3t7WY6X6g==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.3.tgz", + "integrity": "sha512-yZzuIG+jnVu6hNSzFEN07e8BxF3uAzYtQb6uDkaYZLo6oYZDCq454c5kB8zxnzfCYyP4MIuyBn10L0DqwujTmA==", "dev": true, "dependencies": { "ajv": "^6.12.4", @@ -495,37 +511,45 @@ } }, "node_modules/@eslint/js": { - "version": "8.49.0", - "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.49.0.tgz", - "integrity": "sha512-1S8uAY/MTJqVx0SC4epBq+N2yhuwtNwLbJYNZyhL2pO1ZVKn5HFXav5T41Ryzy9K9V7ZId2JB2oy/W4aCd9/2w==", + "version": "8.53.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.53.0.tgz", + "integrity": "sha512-Kn7K8dx/5U6+cT1yEhpX1w4PCSg0M+XyRILPgvwcEBjerFWCwQj5sbr3/VmxqV0JGHCBCzyd6LxypEuehypY1w==", "dev": true, "engines": { "node": "^12.22.0 || ^14.17.0 || >=16.0.0" } }, + "node_modules/@fastify/busboy": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@fastify/busboy/-/busboy-2.1.0.tgz", + "integrity": "sha512-+KpH+QxZU7O4675t3mnkQKcZZg56u+K/Ct2K+N2AZYNVK8kyeo/bI18tI8aPm3tvNNRyTWfj6s5tnGNlcbQRsA==", + "engines": { + "node": ">=14" + } + }, "node_modules/@floating-ui/core": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.4.1.tgz", - "integrity": "sha512-jk3WqquEJRlcyu7997NtR5PibI+y5bi+LS3hPmguVClypenMsCY3CBa3LAQnozRCtCrYWSEtAdiskpamuJRFOQ==", + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@floating-ui/core/-/core-1.5.0.tgz", + "integrity": "sha512-kK1h4m36DQ0UHGj5Ah4db7R0rHemTqqO0QLvUqi1/mUUp3LuAWbWxdxSIf/XsnH9VS6rRVPLJCncjRzUvyCLXg==", "dependencies": { - "@floating-ui/utils": "^0.1.1" + "@floating-ui/utils": "^0.1.3" } }, "node_modules/@floating-ui/dom": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.5.1.tgz", - "integrity": "sha512-KwvVcPSXg6mQygvA1TjbN/gh///36kKtllIF8SUm0qpFj8+rvYrpvlYdL1JoA71SHpDqgSSdGOSoQ0Mp3uY5aw==", + "version": "1.5.3", + "resolved": "https://registry.npmjs.org/@floating-ui/dom/-/dom-1.5.3.tgz", + "integrity": "sha512-ClAbQnEqJAKCJOEbbLo5IUlZHkNszqhuxS4fHAVxRPXPya6Ysf2G8KypnYcOTpx6I8xcgF9bbHb6g/2KpbV8qA==", "dependencies": { - "@floating-ui/core": "^1.4.1", - "@floating-ui/utils": "^0.1.1" + "@floating-ui/core": "^1.4.2", + "@floating-ui/utils": "^0.1.3" } }, "node_modules/@floating-ui/react-dom": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.1.tgz", - "integrity": "sha512-rZtAmSht4Lry6gdhAJDrCp/6rKN7++JnL1/Anbr/DdeyYXQPxvg/ivrbYvJulbRf4vL8b212suwMM2lxbv+RQA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/@floating-ui/react-dom/-/react-dom-2.0.2.tgz", + "integrity": "sha512-5qhlDvjaLmAst/rKb3VdlCinwTF4EYMiVxuuc/HVUjs46W0zgtbMmAZ1UTsDrRTxRmUEzl92mOtWbeeXL26lSQ==", "dependencies": { - "@floating-ui/dom": "^1.3.0" + "@floating-ui/dom": "^1.5.1" }, "peerDependencies": { "react": ">=16.8.0", @@ -533,14 +557,14 @@ } }, "node_modules/@floating-ui/utils": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.1.1.tgz", - "integrity": "sha512-m0G6wlnhm/AX0H12IOWtK8gASEMffnX08RtKkCgTdHb9JpHKGloI7icFfLg9ZmQeavcvR0PKmzxClyuFPSjKWw==" + "version": "0.1.6", + "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.1.6.tgz", + "integrity": "sha512-OfX7E2oUDYxtBvsuS4e/jSn4Q9Qb6DzgeYtsAdkPZ47znpoNsMgZw0+tVijiv3uGNR6dgNlty6r9rzIzHjtd/A==" }, "node_modules/@graphiql/react": { - "version": "0.19.3", - "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.19.3.tgz", - "integrity": "sha512-rpxKcmKPhyGfZo1w9h3+E5FY+LXOn8o5fJxpJd2MbLF8segvvWLtJeXL46Q2IkEFqR4uxf00NUTbCwXjRIVaQQ==", + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/@graphiql/react/-/react-0.20.2.tgz", + "integrity": "sha512-/crAUlM+4iVHyNHVdiZjsTEqfMXBHfjEvrMwCwTVig6YXmCAVuaxqkD7NlDtrrPQArLGkABmf1Nw7ObRpby5lg==", "dependencies": { "@graphiql/toolkit": "^0.9.1", "@headlessui/react": "^1.7.15", @@ -551,10 +575,10 @@ "@types/codemirror": "^5.60.8", "clsx": "^1.2.1", "codemirror": "^5.65.3", - "codemirror-graphql": "^2.0.9", + "codemirror-graphql": "^2.0.10", "copy-to-clipboard": "^3.2.0", "framer-motion": "^6.5.1", - "graphql-language-service": "^5.1.7", + "graphql-language-service": "^5.2.0", "markdown-it": "^12.2.0", "set-value": "^4.1.0" }, @@ -583,9 +607,9 @@ } }, "node_modules/@headlessui/react": { - "version": "1.7.16", - "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.16.tgz", - "integrity": "sha512-2MphIAZdSUacZBT6EXk8AJkj+EuvaaJbtCyHTJrPsz8inhzCl7qeNPI1uk1AUvCgWylVtdN8cVVmnhUDPxPy3g==", + "version": "1.7.17", + "resolved": "https://registry.npmjs.org/@headlessui/react/-/react-1.7.17.tgz", + "integrity": "sha512-4am+tzvkqDSSgiwrsEpGWqgGo9dz8qU5M3znCkC4PgkpY4HcCZzEDEvozltGGGHIKl9jbXbZPSH5TWn4sWJdow==", "dependencies": { "client-only": "^0.0.1" }, @@ -598,12 +622,12 @@ } }, "node_modules/@humanwhocodes/config-array": { - "version": "0.11.11", - "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.11.tgz", - "integrity": "sha512-N2brEuAadi0CcdeMXUkhbZB84eskAc8MEX1By6qEchoVywSgXPIjou4rYsl0V3Hj0ZnuGycGCjdNgockbzeWNA==", + "version": "0.11.13", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.13.tgz", + "integrity": "sha512-JSBDMiDKSzQVngfRjOdFXgFfklaXI4K9nLF49Auh21lmBWRLIK3+xTErTWD4KU54pb6coM6ESE7Awz/FNU3zgQ==", "dev": true, "dependencies": { - "@humanwhocodes/object-schema": "^1.2.1", + "@humanwhocodes/object-schema": "^2.0.1", "debug": "^4.1.1", "minimatch": "^3.0.5" }, @@ -625,15 +649,15 @@ } }, "node_modules/@humanwhocodes/object-schema": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-1.2.1.tgz", - "integrity": "sha512-ZnQMnLV4e7hDlUvw8H+U8ASL02SS2Gn6+9Ac3wGGLIe7+je2AeAOxPY+izIPJDfFDb7eDjev0Us8MO1iFRN8hA==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.1.tgz", + "integrity": "sha512-dvuCeX5fC9dXgJn9t+X5atfmgQAzUOWqS1254Gh0m6i8wKd10ebXkfNKiRK+1GWi/yTvvLDHpoxLr0xxxeslWw==", "dev": true }, "node_modules/@lezer/common": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.0.3.tgz", - "integrity": "sha512-JH4wAXCgUOcCGNekQPLhVeUtIqjH0yPBs7vvUdSjyQama9618IOKFJwkv2kcqdhF0my8hQEgCTEJU0GIgnahvA==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.1.0.tgz", + "integrity": "sha512-XPIN3cYDXsoJI/oDWoR2tD++juVrhgIago9xyKhZ7IhGlzdDM9QgC8D8saKNCz5pindGcznFr2HBSsEQSWnSjw==", "peer": true }, "node_modules/@lezer/highlight": { @@ -646,30 +670,25 @@ } }, "node_modules/@lezer/lr": { - "version": "1.3.9", - "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.9.tgz", - "integrity": "sha512-XPz6dzuTHlnsbA5M2DZgjflNQ+9Hi5Swhic0RULdp3oOs3rh6bqGZolosVqN/fQIT8uNiepzINJDnS39oweTHQ==", + "version": "1.3.14", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.3.14.tgz", + "integrity": "sha512-z5mY4LStlA3yL7aHT/rqgG614cfcvklS+8oFRFBYrs4YaWLJyKKM4+nN6KopToX0o9Hj6zmH6M5kinOYuy06ug==", "peer": true, "dependencies": { "@lezer/common": "^1.0.0" } }, "node_modules/@motionone/animation": { - "version": "10.15.1", - "resolved": "https://registry.npmjs.org/@motionone/animation/-/animation-10.15.1.tgz", - "integrity": "sha512-mZcJxLjHor+bhcPuIFErMDNyrdb2vJur8lSfMCsuCB4UyV8ILZLvK+t+pg56erv8ud9xQGK/1OGPt10agPrCyQ==", + "version": "10.16.3", + "resolved": "https://registry.npmjs.org/@motionone/animation/-/animation-10.16.3.tgz", + "integrity": "sha512-QUGWpLbMFLhyqKlngjZhjtxM8IqiJQjLK0DF+XOF6od9nhSvlaeEpOY/UMCRVcZn/9Tr2rZO22EkuCIjYdI74g==", "dependencies": { - "@motionone/easing": "^10.15.1", - "@motionone/types": "^10.15.1", - "@motionone/utils": "^10.15.1", + "@motionone/easing": "^10.16.3", + "@motionone/types": "^10.16.3", + "@motionone/utils": "^10.16.3", "tslib": "^2.3.1" } }, - "node_modules/@motionone/animation/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/@motionone/dom": { "version": "10.12.0", "resolved": "https://registry.npmjs.org/@motionone/dom/-/dom-10.12.0.tgz", @@ -683,60 +702,40 @@ "tslib": "^2.3.1" } }, - "node_modules/@motionone/dom/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/@motionone/easing": { - "version": "10.15.1", - "resolved": "https://registry.npmjs.org/@motionone/easing/-/easing-10.15.1.tgz", - "integrity": "sha512-6hIHBSV+ZVehf9dcKZLT7p5PEKHGhDwky2k8RKkmOvUoYP3S+dXsKupyZpqx5apjd9f+php4vXk4LuS+ADsrWw==", + "version": "10.16.3", + "resolved": "https://registry.npmjs.org/@motionone/easing/-/easing-10.16.3.tgz", + "integrity": "sha512-HWTMZbTmZojzwEuKT/xCdvoMPXjYSyQvuVM6jmM0yoGU6BWzsmYMeB4bn38UFf618fJCNtP9XeC/zxtKWfbr0w==", "dependencies": { - "@motionone/utils": "^10.15.1", + "@motionone/utils": "^10.16.3", "tslib": "^2.3.1" } }, - "node_modules/@motionone/easing/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/@motionone/generators": { - "version": "10.15.1", - "resolved": "https://registry.npmjs.org/@motionone/generators/-/generators-10.15.1.tgz", - "integrity": "sha512-67HLsvHJbw6cIbLA/o+gsm7h+6D4Sn7AUrB/GPxvujse1cGZ38F5H7DzoH7PhX+sjvtDnt2IhFYF2Zp1QTMKWQ==", + "version": "10.16.4", + "resolved": "https://registry.npmjs.org/@motionone/generators/-/generators-10.16.4.tgz", + "integrity": "sha512-geFZ3w0Rm0ZXXpctWsSf3REGywmLLujEjxPYpBR0j+ymYwof0xbV6S5kGqqsDKgyWKVWpUInqQYvQfL6fRbXeg==", "dependencies": { - "@motionone/types": "^10.15.1", - "@motionone/utils": "^10.15.1", + "@motionone/types": "^10.16.3", + "@motionone/utils": "^10.16.3", "tslib": "^2.3.1" } }, - "node_modules/@motionone/generators/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/@motionone/types": { - "version": "10.15.1", - "resolved": "https://registry.npmjs.org/@motionone/types/-/types-10.15.1.tgz", - "integrity": "sha512-iIUd/EgUsRZGrvW0jqdst8st7zKTzS9EsKkP+6c6n4MPZoQHwiHuVtTQLD6Kp0bsBLhNzKIBlHXponn/SDT4hA==" + "version": "10.16.3", + "resolved": "https://registry.npmjs.org/@motionone/types/-/types-10.16.3.tgz", + "integrity": "sha512-W4jkEGFifDq73DlaZs3HUfamV2t1wM35zN/zX7Q79LfZ2sc6C0R1baUHZmqc/K5F3vSw3PavgQ6HyHLd/MXcWg==" }, "node_modules/@motionone/utils": { - "version": "10.15.1", - "resolved": "https://registry.npmjs.org/@motionone/utils/-/utils-10.15.1.tgz", - "integrity": "sha512-p0YncgU+iklvYr/Dq4NobTRdAPv9PveRDUXabPEeOjBLSO/1FNB2phNTZxOxpi1/GZwYpAoECEa0Wam+nsmhSw==", + "version": "10.16.3", + "resolved": "https://registry.npmjs.org/@motionone/utils/-/utils-10.16.3.tgz", + "integrity": "sha512-WNWDksJIxQkaI9p9Z9z0+K27xdqISGNFy1SsWVGaiedTHq0iaT6iZujby8fT/ZnZxj1EOaxJtSfUPCFNU5CRoA==", "dependencies": { - "@motionone/types": "^10.15.1", + "@motionone/types": "^10.16.3", "hey-listen": "^1.0.8", "tslib": "^2.3.1" } }, - "node_modules/@motionone/utils/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/@n1ru4l/push-pull-async-iterable-iterator": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/@n1ru4l/push-pull-async-iterable-iterator/-/push-pull-async-iterable-iterator-3.2.0.tgz", @@ -872,19 +871,19 @@ } }, "node_modules/@radix-ui/react-dialog": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.0.4.tgz", - "integrity": "sha512-hJtRy/jPULGQZceSAP2Re6/4NpKo8im6V8P2hUqZsdFiSL8l35kYsw3qbRI6Ay5mQd2+wlLqje770eq+RJ3yZg==", + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dialog/-/react-dialog-1.0.5.tgz", + "integrity": "sha512-GjWJX/AUpB703eEBanuBnIWdIXg6NvJFCXcNlSZk4xdszCdhrJgBoUd1cGk67vFO+WdA2pfI/plOpqz/5GUP6Q==", "dependencies": { "@babel/runtime": "^7.13.10", "@radix-ui/primitive": "1.0.1", "@radix-ui/react-compose-refs": "1.0.1", "@radix-ui/react-context": "1.0.1", - "@radix-ui/react-dismissable-layer": "1.0.4", + "@radix-ui/react-dismissable-layer": "1.0.5", "@radix-ui/react-focus-guards": "1.0.1", - "@radix-ui/react-focus-scope": "1.0.3", + "@radix-ui/react-focus-scope": "1.0.4", "@radix-ui/react-id": "1.0.1", - "@radix-ui/react-portal": "1.0.3", + "@radix-ui/react-portal": "1.0.4", "@radix-ui/react-presence": "1.0.1", "@radix-ui/react-primitive": "1.0.3", "@radix-ui/react-slot": "1.0.2", @@ -925,9 +924,9 @@ } }, "node_modules/@radix-ui/react-dismissable-layer": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.0.4.tgz", - "integrity": "sha512-7UpBa/RKMoHJYjie1gkF1DlK8l1fdU/VKDpoS3rCCo8YBJR294GwcEHyxHw72yvphJ7ld0AXEcSLAzY2F/WyCg==", + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dismissable-layer/-/react-dismissable-layer-1.0.5.tgz", + "integrity": "sha512-aJeDjQhywg9LBu2t/At58hCvr7pEm0o2Ke1x33B+MhjNmmZ17sy4KImo0KPLgsnc/zN7GPdce8Cnn0SWvwZO7g==", "dependencies": { "@babel/runtime": "^7.13.10", "@radix-ui/primitive": "1.0.1", @@ -952,16 +951,16 @@ } }, "node_modules/@radix-ui/react-dropdown-menu": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.0.5.tgz", - "integrity": "sha512-xdOrZzOTocqqkCkYo8yRPCib5OkTkqN7lqNCdxwPOdE466DOaNl4N8PkUIlsXthQvW5Wwkd+aEmWpfWlBoDPEw==", + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-dropdown-menu/-/react-dropdown-menu-2.0.6.tgz", + "integrity": "sha512-i6TuFOoWmLWq+M/eCLGd/bQ2HfAX1RJgvrBQ6AQLmzfvsLdefxbWu8G9zczcPFfcSPehz9GcpF6K9QYreFV8hA==", "dependencies": { "@babel/runtime": "^7.13.10", "@radix-ui/primitive": "1.0.1", "@radix-ui/react-compose-refs": "1.0.1", "@radix-ui/react-context": "1.0.1", "@radix-ui/react-id": "1.0.1", - "@radix-ui/react-menu": "2.0.5", + "@radix-ui/react-menu": "2.0.6", "@radix-ui/react-primitive": "1.0.3", "@radix-ui/react-use-controllable-state": "1.0.1" }, @@ -998,9 +997,9 @@ } }, "node_modules/@radix-ui/react-focus-scope": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.0.3.tgz", - "integrity": "sha512-upXdPfqI4islj2CslyfUBNlaJCPybbqRHAi1KER7Isel9Q2AtSJ0zRBZv8mWQiFXD2nyAJ4BhC3yXgZ6kMBSrQ==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-focus-scope/-/react-focus-scope-1.0.4.tgz", + "integrity": "sha512-sL04Mgvf+FmyvZeYfNu1EPAaaxD+aw7cYeIB9L9Fvq8+urhltTRaEo5ysKOpHuKPclsZcSUMKlN05x4u+CINpA==", "dependencies": { "@babel/runtime": "^7.13.10", "@radix-ui/react-compose-refs": "1.0.1", @@ -1041,9 +1040,9 @@ } }, "node_modules/@radix-ui/react-menu": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.0.5.tgz", - "integrity": "sha512-Gw4f9pwdH+w5w+49k0gLjN0PfRDHvxmAgG16AbyJZ7zhwZ6PBHKtWohvnSwfusfnK3L68dpBREHpVkj8wEM7ZA==", + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/@radix-ui/react-menu/-/react-menu-2.0.6.tgz", + "integrity": "sha512-BVkFLS+bUC8HcImkRKPSiVumA1VPOOEC5WBMiT+QAVsPzW1FJzI9KnqgGxVDPBcql5xXrHkD3JOVoXWEXD8SYA==", "dependencies": { "@babel/runtime": "^7.13.10", "@radix-ui/primitive": "1.0.1", @@ -1051,12 +1050,12 @@ "@radix-ui/react-compose-refs": "1.0.1", "@radix-ui/react-context": "1.0.1", "@radix-ui/react-direction": "1.0.1", - "@radix-ui/react-dismissable-layer": "1.0.4", + "@radix-ui/react-dismissable-layer": "1.0.5", "@radix-ui/react-focus-guards": "1.0.1", - "@radix-ui/react-focus-scope": "1.0.3", + "@radix-ui/react-focus-scope": "1.0.4", "@radix-ui/react-id": "1.0.1", - "@radix-ui/react-popper": "1.1.2", - "@radix-ui/react-portal": "1.0.3", + "@radix-ui/react-popper": "1.1.3", + "@radix-ui/react-portal": "1.0.4", "@radix-ui/react-presence": "1.0.1", "@radix-ui/react-primitive": "1.0.3", "@radix-ui/react-roving-focus": "1.0.4", @@ -1081,9 +1080,9 @@ } }, "node_modules/@radix-ui/react-popper": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.1.2.tgz", - "integrity": "sha512-1CnGGfFi/bbqtJZZ0P/NQY20xdG3E0LALJaLUEoKwPLwl6PPPfbeiCqMVQnhoFRAxjJj4RpBRJzDmUgsex2tSg==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-popper/-/react-popper-1.1.3.tgz", + "integrity": "sha512-cKpopj/5RHZWjrbF2846jBNacjQVwkP068DfmgrNJXpvVWrOvlAmE9xSiy5OqeE+Gi8D9fP+oDhUnPqNMY8/5w==", "dependencies": { "@babel/runtime": "^7.13.10", "@floating-ui/react-dom": "^2.0.0", @@ -1113,9 +1112,9 @@ } }, "node_modules/@radix-ui/react-portal": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.0.3.tgz", - "integrity": "sha512-xLYZeHrWoPmA5mEKEfZZevoVRK/Q43GfzRXkWV6qawIWWK8t6ifIiLQdd7rmQ4Vk1bmI21XhqF9BN3jWf+phpA==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@radix-ui/react-portal/-/react-portal-1.0.4.tgz", + "integrity": "sha512-Qki+C/EuGUVCQTOTD5vzJzJuMUlewbzuKyUy+/iHM2uwGiru9gZeBJtHAPKAEkB5KWGi9mP/CHKcY0wt1aW45Q==", "dependencies": { "@babel/runtime": "^7.13.10", "@radix-ui/react-primitive": "1.0.3" @@ -1232,18 +1231,18 @@ } }, "node_modules/@radix-ui/react-tooltip": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.0.6.tgz", - "integrity": "sha512-DmNFOiwEc2UDigsYj6clJENma58OelxD24O4IODoZ+3sQc3Zb+L8w1EP+y9laTuKCLAysPw4fD6/v0j4KNV8rg==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/@radix-ui/react-tooltip/-/react-tooltip-1.0.7.tgz", + "integrity": "sha512-lPh5iKNFVQ/jav/j6ZrWq3blfDJ0OH9R6FlNUHPMqdLuQ9vwDgFsRxvl8b7Asuy5c8xmoojHUxKHQSOAvMHxyw==", "dependencies": { "@babel/runtime": "^7.13.10", "@radix-ui/primitive": "1.0.1", "@radix-ui/react-compose-refs": "1.0.1", "@radix-ui/react-context": "1.0.1", - "@radix-ui/react-dismissable-layer": "1.0.4", + "@radix-ui/react-dismissable-layer": "1.0.5", "@radix-ui/react-id": "1.0.1", - "@radix-ui/react-popper": "1.1.2", - "@radix-ui/react-portal": "1.0.3", + "@radix-ui/react-popper": "1.1.3", + "@radix-ui/react-portal": "1.0.4", "@radix-ui/react-presence": "1.0.1", "@radix-ui/react-primitive": "1.0.3", "@radix-ui/react-slot": "1.0.2", @@ -1402,315 +1401,975 @@ "@babel/runtime": "^7.13.10" } }, - "node_modules/@swc/core": { - "version": "1.3.62", - "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.3.62.tgz", - "integrity": "sha512-J58hWY+/G8vOr4J6ZH9hLg0lMSijZtqIIf4HofZezGog/pVX6sJyBJ40dZ1ploFkDIlWTWvJyqtpesBKS73gkQ==", - "dev": true, - "hasInstallScript": true, - "engines": { - "node": ">=10" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/swc" - }, - "optionalDependencies": { - "@swc/core-darwin-arm64": "1.3.62", - "@swc/core-darwin-x64": "1.3.62", - "@swc/core-linux-arm-gnueabihf": "1.3.62", - "@swc/core-linux-arm64-gnu": "1.3.62", - "@swc/core-linux-arm64-musl": "1.3.62", - "@swc/core-linux-x64-gnu": "1.3.62", - "@swc/core-linux-x64-musl": "1.3.62", - "@swc/core-win32-arm64-msvc": "1.3.62", - "@swc/core-win32-ia32-msvc": "1.3.62", - "@swc/core-win32-x64-msvc": "1.3.62" - }, - "peerDependencies": { - "@swc/helpers": "^0.5.0" - }, - "peerDependenciesMeta": { - "@swc/helpers": { - "optional": true - } + "node_modules/@swagger-api/apidom-ast": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ast/-/apidom-ast-0.83.0.tgz", + "integrity": "sha512-zAn9kHFi2JmEldYxzw6x7rbKxL4NVWvOeCWQL0AlwcWHPRhW+16/1VeHNhoWeiWm6QMERNT8z0o5frg+2czb6g==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-error": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2", + "unraw": "^3.0.0" } }, - "node_modules/@swc/core-darwin-arm64": { - "version": "1.3.62", - "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.3.62.tgz", - "integrity": "sha512-MmGilibITz68LEje6vJlKzc2gUUSgzvB3wGLSjEORikTNeM7P8jXVxE4A8fgZqDeudJUm9HVWrxCV+pHDSwXhA==", - "cpu": [ - "arm64" - ], - "dev": true, + "node_modules/@swagger-api/apidom-core": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-core/-/apidom-core-0.83.0.tgz", + "integrity": "sha512-4pWzSbxfYrS5rH7tl4WLO5nyR7pF+aAIymwsyV2Xrec44p6d4UZaJEn1iI3r9PBBdlmOHPKgr3QiOxn71Q3XUA==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-ast": "^0.83.0", + "@swagger-api/apidom-error": "^0.83.0", + "@types/ramda": "~0.29.6", + "minim": "~0.23.8", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "short-unique-id": "^5.0.2", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-error": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-error/-/apidom-error-0.83.0.tgz", + "integrity": "sha512-0T3B+5Q2cApW0EkcMAqpgvsj+ab46HPvkVsYClA9/L0suRvyPiI5XDkHsw26qPGsmuB5nCH4hveZHlbWwRINMg==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7" + } + }, + "node_modules/@swagger-api/apidom-json-pointer": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-json-pointer/-/apidom-json-pointer-0.83.0.tgz", + "integrity": "sha512-mT60Dfqfym9LisGcFEUV/ZwCWrcd/sI24ACAUr7D/gCMX2GuJHC7qrRwWVjGDaaDMVhDM5eCi6GKPjQhs0Ckmw==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-error": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } + }, + "node_modules/@swagger-api/apidom-ns-api-design-systems": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-api-design-systems/-/apidom-ns-api-design-systems-0.83.0.tgz", + "integrity": "sha512-ahkhB8QIQhos0g2WRAPb7d3HRPP4FgaPTq81Fd3IeCy1pqsRrMhBOHBt3aksOmSvCrHScXHiIU0OBsGA+vt1CA==", "optional": true, - "os": [ - "darwin" - ], - "engines": { - "node": ">=10" + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-error": "^0.83.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" } }, - "node_modules/@tanstack/query-core": { - "version": "4.35.3", - "resolved": "https://registry.npmjs.org/@tanstack/query-core/-/query-core-4.35.3.tgz", - "integrity": "sha512-PS+WEjd9wzKTyNjjQymvcOe1yg8f3wYc6mD+vb6CKyZAKvu4sIJwryfqfBULITKCla7P9C4l5e9RXePHvZOZeQ==", - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" + "node_modules/@swagger-api/apidom-ns-asyncapi-2": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-asyncapi-2/-/apidom-ns-asyncapi-2-0.83.0.tgz", + "integrity": "sha512-A53C93GXcB9D7XSZRzEHv2k+GSa7nl7agN364sFFxS4Q/CtwNQiKVkpMCc5nG7/jUJOgj9BgevBR2p5kgYzH8Q==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-ns-json-schema-draft-7": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-ns-json-schema-draft-4": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-4/-/apidom-ns-json-schema-draft-4-0.83.0.tgz", + "integrity": "sha512-boknhIfrXF1k9IxLV0CkO1EoeXed4mzDNbFNKTkIv7UAdFwAa7NiQLVlEehNY3Ufm3/PjVMzYVQ80tUbyQE2Sw==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-ast": "^0.83.0", + "@swagger-api/apidom-core": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-ns-json-schema-draft-6": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-6/-/apidom-ns-json-schema-draft-6-0.83.0.tgz", + "integrity": "sha512-QP5MJh8hB5eK1+lZlZvUk7H02Oa+Qaq+BPNpAbmV4oG8YLUg98NxyKt+BFVhtfHWa1/i/Cpr3muiNdVIClduxw==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-error": "^0.83.0", + "@swagger-api/apidom-ns-json-schema-draft-4": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" } }, - "node_modules/@tanstack/react-query": { - "version": "4.35.3", - "resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-4.35.3.tgz", - "integrity": "sha512-UgTPioip/rGG3EQilXfA2j4BJkhEQsR+KAbF+KIuvQ7j4MkgnTCJF01SfRpIRNtQTlEfz/+IL7+jP8WA8bFbsw==", + "node_modules/@swagger-api/apidom-ns-json-schema-draft-7": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-json-schema-draft-7/-/apidom-ns-json-schema-draft-7-0.83.0.tgz", + "integrity": "sha512-+91iNJQ1Oe7Hx7Q306O2JUyp7I1s0FvoZ/8FxiVYtcohGQW21CQ0j8kLv4NrQjHuHRgOquPPUXOEJGcX7s8Zsw==", + "optional": true, "dependencies": { - "@tanstack/query-core": "4.35.3", - "use-sync-external-store": "^1.2.0" - }, - "funding": { - "type": "github", - "url": "https://github.com/sponsors/tannerlinsley" - }, - "peerDependencies": { - "react": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react-dom": "^16.8.0 || ^17.0.0 || ^18.0.0", - "react-native": "*" - }, - "peerDependenciesMeta": { - "react-dom": { - "optional": true - }, - "react-native": { - "optional": true - } + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-error": "^0.83.0", + "@swagger-api/apidom-ns-json-schema-draft-6": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" } }, - "node_modules/@types/codemirror": { - "version": "5.60.8", - "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-5.60.8.tgz", - "integrity": "sha512-VjFgDF/eB+Aklcy15TtOTLQeMjTo07k7KAjql8OK5Dirr7a6sJY4T1uVBDuTVG9VEmn1uUsohOpYnVfgC6/jyw==", + "node_modules/@swagger-api/apidom-ns-openapi-2": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-2/-/apidom-ns-openapi-2-0.83.0.tgz", + "integrity": "sha512-05/IsGs1dJffvbyaxCXGA5r+tVMJpL+LOwqiKl7hGqUWOC4ku2sA0fLhxiu7fhedxq/Kbqi7ahQMihQhEP0cDQ==", + "optional": true, "dependencies": { - "@types/tern": "*" + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-error": "^0.83.0", + "@swagger-api/apidom-ns-json-schema-draft-4": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-ns-openapi-3-0": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-0/-/apidom-ns-openapi-3-0-0.83.0.tgz", + "integrity": "sha512-OAN6buySWrWSvnctKVSxkG5HyUOVc8F87zHy8mxcKn91AaHPC6h8LBxIXcmXFDfZNvORZYTi7GFw3W+mnIMTwg==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-error": "^0.83.0", + "@swagger-api/apidom-ns-json-schema-draft-4": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-ns-openapi-3-1": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-ns-openapi-3-1/-/apidom-ns-openapi-3-1-0.83.0.tgz", + "integrity": "sha512-xD/T5f9Phqk4/FN5iaH8OM+5AbUqXQV92zdN5twrLCgCCA3l/1PMA7g9qEBTCG3f6UmyJ/6TTFOJyz7utye7Hg==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-ast": "^0.83.0", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-ns-openapi-3-0": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" + } + }, + "node_modules/@swagger-api/apidom-parser-adapter-api-design-systems-json": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-json/-/apidom-parser-adapter-api-design-systems-json-0.83.0.tgz", + "integrity": "sha512-GeMW5pamup8KeaYSbyV2/zMilslIPhQLMf9h9le9JJGJ233ugiBf/y5Vguyj1w1TQXniXztXF43B3A+RNArkmg==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-ns-api-design-systems": "^0.83.0", + "@swagger-api/apidom-parser-adapter-json": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" } }, - "node_modules/@types/estree": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.1.tgz", - "integrity": "sha512-LG4opVs2ANWZ1TJoKc937iMmNstM/d0ae1vNbnBvBhqCSezgVUOzcLCqbI5elV8Vy6WKwKjaqR+zO9VKirBBCA==" + "node_modules/@swagger-api/apidom-parser-adapter-api-design-systems-yaml": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-api-design-systems-yaml/-/apidom-parser-adapter-api-design-systems-yaml-0.83.0.tgz", + "integrity": "sha512-KYpW/gVfz4SQ4YPmC3x9wnUcOlwah7D4r/S2+FLvEQhf6LoEmKHL1ljcZ1Ma3seWCqMhmS1sKXHWNcYyNtY49A==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-ns-api-design-systems": "^0.83.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } }, - "node_modules/@types/json-schema": { - "version": "7.0.12", - "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.12.tgz", - "integrity": "sha512-Hr5Jfhc9eYOQNPYO5WLDq/n4jqijdHNlDXjuAQkkt+mWdQR+XJToOHrsD4cPaMXpn6KO7y2+wM8AZEs8VpBLVA==", - "dev": true + "node_modules/@swagger-api/apidom-parser-adapter-asyncapi-json-2": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-json-2/-/apidom-parser-adapter-asyncapi-json-2-0.83.0.tgz", + "integrity": "sha512-iQPDH6uIGRvJTQt6olkVUwndT91fVNrlBH3LybwHbFVLs1CKcQGJQ4lLENGw97YBVp83VO78P20Av5CiGEu80Q==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-ns-asyncapi-2": "^0.83.0", + "@swagger-api/apidom-parser-adapter-json": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } }, - "node_modules/@types/prop-types": { - "version": "15.7.5", - "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", - "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==", - "devOptional": true + "node_modules/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-asyncapi-yaml-2/-/apidom-parser-adapter-asyncapi-yaml-2-0.83.0.tgz", + "integrity": "sha512-Q5UuatTIpYTzdCZH6ZcbT9Pw0MCLzaYzrFM6hdBWusbUriuwT12nTyt3Wer7/6nOcg+ysPTX7lUpxfUMPwT6xA==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-ns-asyncapi-2": "^0.83.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } }, - "node_modules/@types/react": { - "version": "18.2.21", - "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.21.tgz", - "integrity": "sha512-neFKG/sBAwGxHgXiIxnbm3/AAVQ/cMRS93hvBpg8xYRbeQSPVABp9U2bRnPf0iI4+Ucdv3plSxKK+3CW2ENJxA==", - "devOptional": true, + "node_modules/@swagger-api/apidom-parser-adapter-json": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-json/-/apidom-parser-adapter-json-0.83.0.tgz", + "integrity": "sha512-V6KDWP4JuLYaTpd9J8n76kiFP09trJ6PmeVERioPoZn0HpaNh7eFcIFkejFGamQADYPrF6aW6b3A2MmJjTqbMg==", + "optional": true, "dependencies": { - "@types/prop-types": "*", - "@types/scheduler": "*", - "csstype": "^3.0.2" + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-ast": "^0.83.0", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-error": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2", + "tree-sitter": "=0.20.4", + "tree-sitter-json": "=0.20.1", + "web-tree-sitter": "=0.20.3" + } + }, + "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-2": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-2/-/apidom-parser-adapter-openapi-json-2-0.83.0.tgz", + "integrity": "sha512-bNrD+hpmQINU+hhzgc5VEFp04UJXRf4tKq4XpPrtVBOvZ4uJwmqLVVVNfZqes8OfLt/7ijgxNju6IwruvLeylQ==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-ns-openapi-2": "^0.83.0", + "@swagger-api/apidom-parser-adapter-json": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" } }, - "node_modules/@types/react-dom": { - "version": "18.2.7", - "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.7.tgz", - "integrity": "sha512-GRaAEriuT4zp9N4p1i8BDBYmEyfo+xQ3yHjJU4eiK5NDa1RmUZG+unZABUTK4/Ox/M+GaHwb6Ow8rUITrtjszA==", - "devOptional": true, + "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-3-0": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-0/-/apidom-parser-adapter-openapi-json-3-0-0.83.0.tgz", + "integrity": "sha512-UbtCsg+OBbWE1vYXPeNHeLSj+79YHhDtNNPai5NFTcXgPlNhuEOKBeCqq+VBA7sos3amk0lHYUz/UFCDIcR29w==", + "optional": true, "dependencies": { - "@types/react": "*" + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-ns-openapi-3-0": "^0.83.0", + "@swagger-api/apidom-parser-adapter-json": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" } }, - "node_modules/@types/scheduler": { - "version": "0.16.3", - "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", - "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==", - "devOptional": true + "node_modules/@swagger-api/apidom-parser-adapter-openapi-json-3-1": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-json-3-1/-/apidom-parser-adapter-openapi-json-3-1-0.83.0.tgz", + "integrity": "sha512-+O2m00jNtESw1y+KCubcte61S1SN9Nxda/KaA6yXLsZgjiYAs0HXcPEyjwGbhjHtm6NfexbOdT0poHOYbsvWfQ==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.83.0", + "@swagger-api/apidom-parser-adapter-json": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } }, - "node_modules/@types/semver": { - "version": "7.5.2", - "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.2.tgz", - "integrity": "sha512-7aqorHYgdNO4DM36stTiGO3DvKoex9TQRwsJU6vMaFGyqpBA1MNZkz+PG3gaNUPpTAOYhT1WR7M1JyA3fbS9Cw==", - "dev": true + "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-2": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-2/-/apidom-parser-adapter-openapi-yaml-2-0.83.0.tgz", + "integrity": "sha512-YtU1wSE57yucov8A179TSB5WMJ4X5pxF5ccxW8yNxwVPH3tYkVgh5mPI8zVXQsjWLCSpyhZbiLWT5reYl5Onqw==", + "optional": true, + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-ns-openapi-2": "^0.83.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" + } }, - "node_modules/@types/tern": { - "version": "0.23.4", - "resolved": "https://registry.npmjs.org/@types/tern/-/tern-0.23.4.tgz", - "integrity": "sha512-JAUw1iXGO1qaWwEOzxTKJZ/5JxVeON9kvGZ/osgZaJImBnyjyn0cjovPsf6FNLmyGY8Vw9DoXZCMlfMkMwHRWg==", + "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-0/-/apidom-parser-adapter-openapi-yaml-3-0-0.83.0.tgz", + "integrity": "sha512-3he5fFM3GS6/WtcVldvWQgW2TFO7S2rWqYMHGASdLLm8E9pzfRw2T30ZymkDuMlC4rqH9zscbJnRFMXQV9OylQ==", + "optional": true, "dependencies": { - "@types/estree": "*" + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-ns-openapi-3-0": "^0.83.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" } }, - "node_modules/@typescript-eslint/eslint-plugin": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.7.0.tgz", - "integrity": "sha512-gUqtknHm0TDs1LhY12K2NA3Rmlmp88jK9Tx8vGZMfHeNMLE3GH2e9TRub+y+SOjuYgtOmok+wt1AyDPZqxbNag==", - "dev": true, + "node_modules/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-openapi-yaml-3-1/-/apidom-parser-adapter-openapi-yaml-3-1-0.83.0.tgz", + "integrity": "sha512-m8SAWw8fD0QH3SR70NiDzFsJnQjzEREY5v8O8brqs5c/Rz/JtJ2WCDrLHK7eVq/Myapl/ZRJx+/xJbPZckzE0g==", + "optional": true, "dependencies": { - "@eslint-community/regexpp": "^4.5.1", - "@typescript-eslint/scope-manager": "6.7.0", - "@typescript-eslint/type-utils": "6.7.0", - "@typescript-eslint/utils": "6.7.0", - "@typescript-eslint/visitor-keys": "6.7.0", - "debug": "^4.3.4", - "graphemer": "^1.4.0", - "ignore": "^5.2.4", - "natural-compare": "^1.4.0", - "semver": "^7.5.4", - "ts-api-utils": "^1.0.1" - }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", - "eslint": "^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.83.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.0.0" } }, - "node_modules/@typescript-eslint/parser": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.7.0.tgz", - "integrity": "sha512-jZKYwqNpNm5kzPVP5z1JXAuxjtl2uG+5NpaMocFPTNC2EdYIgbXIPImObOkhbONxtFTTdoZstLZefbaK+wXZng==", - "dev": true, + "node_modules/@swagger-api/apidom-parser-adapter-yaml-1-2": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-parser-adapter-yaml-1-2/-/apidom-parser-adapter-yaml-1-2-0.83.0.tgz", + "integrity": "sha512-3Pgtz88rxaiW2qg1RC8BUhusHAXe/a+FDNscfa9GHzHMEVZSmeZ13tfhzOW6a4TINmWyO7DNcKtdvlVQAPlmXQ==", + "optional": true, "dependencies": { - "@typescript-eslint/scope-manager": "6.7.0", - "@typescript-eslint/types": "6.7.0", - "@typescript-eslint/typescript-estree": "6.7.0", - "@typescript-eslint/visitor-keys": "6.7.0", - "debug": "^4.3.4" + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-ast": "^0.83.0", + "@swagger-api/apidom-core": "^0.83.0", + "@swagger-api/apidom-error": "^0.83.0", + "@types/ramda": "~0.29.6", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2", + "tree-sitter": "=0.20.4", + "tree-sitter-yaml": "=0.5.0", + "web-tree-sitter": "=0.20.3" + } + }, + "node_modules/@swagger-api/apidom-reference": { + "version": "0.83.0", + "resolved": "https://registry.npmjs.org/@swagger-api/apidom-reference/-/apidom-reference-0.83.0.tgz", + "integrity": "sha512-f7Pm3fQwjf1pqniV+9abkC+oYUAbL/31GCg58r8ou4Cx+5hGTpUg81caMjdeg5Y4+Txj2ZUaAaUYyigEV25i4w==", + "dependencies": { + "@babel/runtime-corejs3": "^7.20.7", + "@swagger-api/apidom-core": "^0.83.0", + "@types/ramda": "~0.29.6", + "axios": "^1.4.0", + "minimatch": "^7.4.3", + "process": "^0.11.10", + "ramda": "~0.29.0", + "ramda-adjunct": "^4.1.1", + "stampit": "^4.3.2" }, - "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "optionalDependencies": { + "@swagger-api/apidom-error": "^0.83.0", + "@swagger-api/apidom-json-pointer": "^0.83.0", + "@swagger-api/apidom-ns-asyncapi-2": "^0.83.0", + "@swagger-api/apidom-ns-openapi-2": "^0.83.0", + "@swagger-api/apidom-ns-openapi-3-0": "^0.83.0", + "@swagger-api/apidom-ns-openapi-3-1": "^0.83.0", + "@swagger-api/apidom-parser-adapter-api-design-systems-json": "^0.83.0", + "@swagger-api/apidom-parser-adapter-api-design-systems-yaml": "^0.83.0", + "@swagger-api/apidom-parser-adapter-asyncapi-json-2": "^0.83.0", + "@swagger-api/apidom-parser-adapter-asyncapi-yaml-2": "^0.83.0", + "@swagger-api/apidom-parser-adapter-json": "^0.83.0", + "@swagger-api/apidom-parser-adapter-openapi-json-2": "^0.83.0", + "@swagger-api/apidom-parser-adapter-openapi-json-3-0": "^0.83.0", + "@swagger-api/apidom-parser-adapter-openapi-json-3-1": "^0.83.0", + "@swagger-api/apidom-parser-adapter-openapi-yaml-2": "^0.83.0", + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-0": "^0.83.0", + "@swagger-api/apidom-parser-adapter-openapi-yaml-3-1": "^0.83.0", + "@swagger-api/apidom-parser-adapter-yaml-1-2": "^0.83.0" + } + }, + "node_modules/@swagger-api/apidom-reference/node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dependencies": { + "balanced-match": "^1.0.0" } }, - "node_modules/@typescript-eslint/scope-manager": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.7.0.tgz", - "integrity": "sha512-lAT1Uau20lQyjoLUQ5FUMSX/dS07qux9rYd5FGzKz/Kf8W8ccuvMyldb8hadHdK/qOI7aikvQWqulnEq2nCEYA==", - "dev": true, + "node_modules/@swagger-api/apidom-reference/node_modules/minimatch": { + "version": "7.4.6", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-7.4.6.tgz", + "integrity": "sha512-sBz8G/YjVniEz6lKPNpKxXwazJe4c19fEfV2GDMX6AjFz+MX9uDWIZW8XreVhkFW3fkIdTv/gxWr/Kks5FFAVw==", "dependencies": { - "@typescript-eslint/types": "6.7.0", - "@typescript-eslint/visitor-keys": "6.7.0" + "brace-expansion": "^2.0.1" }, "engines": { - "node": "^16.0.0 || >=18.0.0" + "node": ">=10" }, "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" + "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/@typescript-eslint/type-utils": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.7.0.tgz", - "integrity": "sha512-f/QabJgDAlpSz3qduCyQT0Fw7hHpmhOzY/Rv6zO3yO+HVIdPfIWhrQoAyG+uZVtWAIS85zAyzgAFfyEr+MgBpg==", + "node_modules/@swc/core": { + "version": "1.3.96", + "resolved": "https://registry.npmjs.org/@swc/core/-/core-1.3.96.tgz", + "integrity": "sha512-zwE3TLgoZwJfQygdv2SdCK9mRLYluwDOM53I+dT6Z5ZvrgVENmY3txvWDvduzkV+/8IuvrRbVezMpxcojadRdQ==", "dev": true, + "hasInstallScript": true, "dependencies": { - "@typescript-eslint/typescript-estree": "6.7.0", - "@typescript-eslint/utils": "6.7.0", - "debug": "^4.3.4", - "ts-api-utils": "^1.0.1" + "@swc/counter": "^0.1.1", + "@swc/types": "^0.1.5" }, "engines": { - "node": "^16.0.0 || >=18.0.0" + "node": ">=10" }, "funding": { "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" + "url": "https://opencollective.com/swc" + }, + "optionalDependencies": { + "@swc/core-darwin-arm64": "1.3.96", + "@swc/core-darwin-x64": "1.3.96", + "@swc/core-linux-arm-gnueabihf": "1.3.96", + "@swc/core-linux-arm64-gnu": "1.3.96", + "@swc/core-linux-arm64-musl": "1.3.96", + "@swc/core-linux-x64-gnu": "1.3.96", + "@swc/core-linux-x64-musl": "1.3.96", + "@swc/core-win32-arm64-msvc": "1.3.96", + "@swc/core-win32-ia32-msvc": "1.3.96", + "@swc/core-win32-x64-msvc": "1.3.96" }, "peerDependencies": { - "eslint": "^7.0.0 || ^8.0.0" + "@swc/helpers": "^0.5.0" }, "peerDependenciesMeta": { - "typescript": { + "@swc/helpers": { "optional": true } } }, - "node_modules/@typescript-eslint/types": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.7.0.tgz", - "integrity": "sha512-ihPfvOp7pOcN/ysoj0RpBPOx3HQTJTrIN8UZK+WFd3/iDeFHHqeyYxa4hQk4rMhsz9H9mXpR61IzwlBVGXtl9Q==", + "node_modules/@swc/core-darwin-arm64": { + "version": "1.3.96", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-arm64/-/core-darwin-arm64-1.3.96.tgz", + "integrity": "sha512-8hzgXYVd85hfPh6mJ9yrG26rhgzCmcLO0h1TIl8U31hwmTbfZLzRitFQ/kqMJNbIBCwmNH1RU2QcJnL3d7f69A==", + "cpu": [ + "arm64" + ], "dev": true, + "optional": true, + "os": [ + "darwin" + ], "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" + "node": ">=10" } }, - "node_modules/@typescript-eslint/typescript-estree": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.7.0.tgz", - "integrity": "sha512-dPvkXj3n6e9yd/0LfojNU8VMUGHWiLuBZvbM6V6QYD+2qxqInE7J+J/ieY2iGwR9ivf/R/haWGkIj04WVUeiSQ==", + "node_modules/@swc/core-darwin-x64": { + "version": "1.3.96", + "resolved": "https://registry.npmjs.org/@swc/core-darwin-x64/-/core-darwin-x64-1.3.96.tgz", + "integrity": "sha512-mFp9GFfuPg+43vlAdQZl0WZpZSE8sEzqL7sr/7Reul5McUHP0BaLsEzwjvD035ESfkY8GBZdLpMinblIbFNljQ==", + "cpu": [ + "x64" + ], "dev": true, - "dependencies": { - "@typescript-eslint/types": "6.7.0", - "@typescript-eslint/visitor-keys": "6.7.0", - "debug": "^4.3.4", - "globby": "^11.1.0", - "is-glob": "^4.0.3", - "semver": "^7.5.4", - "ts-api-utils": "^1.0.1" - }, + "optional": true, + "os": [ + "darwin" + ], "engines": { - "node": "^16.0.0 || >=18.0.0" - }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/typescript-eslint" - }, - "peerDependenciesMeta": { - "typescript": { - "optional": true - } + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm-gnueabihf": { + "version": "1.3.96", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm-gnueabihf/-/core-linux-arm-gnueabihf-1.3.96.tgz", + "integrity": "sha512-8UEKkYJP4c8YzYIY/LlbSo8z5Obj4hqcv/fUTHiEePiGsOddgGf7AWjh56u7IoN/0uEmEro59nc1ChFXqXSGyg==", + "cpu": [ + "arm" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-gnu": { + "version": "1.3.96", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-gnu/-/core-linux-arm64-gnu-1.3.96.tgz", + "integrity": "sha512-c/IiJ0s1y3Ymm2BTpyC/xr6gOvoqAVETrivVXHq68xgNms95luSpbYQ28rqaZC8bQC8M5zdXpSc0T8DJu8RJGw==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-arm64-musl": { + "version": "1.3.96", + "resolved": "https://registry.npmjs.org/@swc/core-linux-arm64-musl/-/core-linux-arm64-musl-1.3.96.tgz", + "integrity": "sha512-i5/UTUwmJLri7zhtF6SAo/4QDQJDH2fhYJaBIUhrICmIkRO/ltURmpejqxsM/ye9Jqv5zG7VszMC0v/GYn/7BQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-gnu": { + "version": "1.3.96", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-gnu/-/core-linux-x64-gnu-1.3.96.tgz", + "integrity": "sha512-USdaZu8lTIkm4Yf9cogct/j5eqtdZqTgcTib4I+NloUW0E/hySou3eSyp3V2UAA1qyuC72ld1otXuyKBna0YKQ==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-linux-x64-musl": { + "version": "1.3.96", + "resolved": "https://registry.npmjs.org/@swc/core-linux-x64-musl/-/core-linux-x64-musl-1.3.96.tgz", + "integrity": "sha512-QYErutd+G2SNaCinUVobfL7jWWjGTI0QEoQ6hqTp7PxCJS/dmKmj3C5ZkvxRYcq7XcZt7ovrYCTwPTHzt6lZBg==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-arm64-msvc": { + "version": "1.3.96", + "resolved": "https://registry.npmjs.org/@swc/core-win32-arm64-msvc/-/core-win32-arm64-msvc-1.3.96.tgz", + "integrity": "sha512-hjGvvAduA3Un2cZ9iNP4xvTXOO4jL3G9iakhFsgVhpkU73SGmK7+LN8ZVBEu4oq2SUcHO6caWvnZ881cxGuSpg==", + "cpu": [ + "arm64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-ia32-msvc": { + "version": "1.3.96", + "resolved": "https://registry.npmjs.org/@swc/core-win32-ia32-msvc/-/core-win32-ia32-msvc-1.3.96.tgz", + "integrity": "sha512-Far2hVFiwr+7VPCM2GxSmbh3ikTpM3pDombE+d69hkedvYHYZxtTF+2LTKl/sXtpbUnsoq7yV/32c9R/xaaWfw==", + "cpu": [ + "ia32" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/core-win32-x64-msvc": { + "version": "1.3.96", + "resolved": "https://registry.npmjs.org/@swc/core-win32-x64-msvc/-/core-win32-x64-msvc-1.3.96.tgz", + "integrity": "sha512-4VbSAniIu0ikLf5mBX81FsljnfqjoVGleEkCQv4+zRlyZtO3FHoDPkeLVoy6WRlj7tyrRcfUJ4mDdPkbfTO14g==", + "cpu": [ + "x64" + ], + "dev": true, + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=10" + } + }, + "node_modules/@swc/counter": { + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/@swc/counter/-/counter-0.1.2.tgz", + "integrity": "sha512-9F4ys4C74eSTEUNndnER3VJ15oru2NumfQxS8geE+f3eB5xvfxpWyqE5XlVnxb/R14uoXi6SLbBwwiDSkv+XEw==", + "dev": true + }, + "node_modules/@swc/types": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/@swc/types/-/types-0.1.5.tgz", + "integrity": "sha512-myfUej5naTBWnqOCc/MdVOLVjXUXtIA+NpDrDBKJtLLg2shUjBu3cZmB/85RyitKc55+lUUyl7oRfLOvkr2hsw==", + "dev": true + }, + "node_modules/@types/codemirror": { + "version": "5.60.12", + "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-5.60.12.tgz", + "integrity": "sha512-SFSj5Tb/mtQoVgaltsipdRGG1PkcFu/L0OXPNBGCXYUQtwsNoAGRNNHOTl1jYcQUcEI77EiUfk94bgETTbSo/A==", + "dependencies": { + "@types/tern": "*" + } + }, + "node_modules/@types/estree": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.4.tgz", + "integrity": "sha512-2JwWnHK9H+wUZNorf2Zr6ves96WHoWDJIftkcxPKsS7Djta6Zu519LarhRNljPXkpsZR2ZMwNCPeW7omW07BJw==" + }, + "node_modules/@types/hast": { + "version": "2.3.6", + "resolved": "https://registry.npmjs.org/@types/hast/-/hast-2.3.6.tgz", + "integrity": "sha512-47rJE80oqPmFdVDCD7IheXBrVdwuBgsYwoczFvKmwfo2Mzsnt+V9OONsYauFmICb6lQPpCuXYJWejBNs4pDJRg==", + "dependencies": { + "@types/unist": "^2" + } + }, + "node_modules/@types/hoist-non-react-statics": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@types/hoist-non-react-statics/-/hoist-non-react-statics-3.3.3.tgz", + "integrity": "sha512-Wny3a2UXn5FEA1l7gc6BbpoV5mD1XijZqgkp4TRgDCDL5r3B5ieOFGUX5h3n78Tr1MEG7BfvoM8qeztdvNU0fw==", + "dependencies": { + "@types/react": "*", + "hoist-non-react-statics": "^3.3.0" + } + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true + }, + "node_modules/@types/prop-types": { + "version": "15.7.5", + "resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.5.tgz", + "integrity": "sha512-JCB8C6SnDoQf0cNycqd/35A7MjcnK+ZTqE7judS6o7utxUCg6imJg3QK2qzHKszlTjcj2cn+NwMB2i96ubpj7w==" + }, + "node_modules/@types/ramda": { + "version": "0.29.8", + "resolved": "https://registry.npmjs.org/@types/ramda/-/ramda-0.29.8.tgz", + "integrity": "sha512-CmEF76RSSj4NkgFnuQ4ZK3xeq8wMnE9zQH7sr54Yy/a61WbE1qIzWYVfd7XupLbTJY9jCjgEPbv6fqMlsW8Mvw==", + "dependencies": { + "types-ramda": "^0.29.5" + } + }, + "node_modules/@types/react": { + "version": "18.2.37", + "resolved": "https://registry.npmjs.org/@types/react/-/react-18.2.37.tgz", + "integrity": "sha512-RGAYMi2bhRgEXT3f4B92WTohopH6bIXw05FuGlmJEnv/omEn190+QYEIYxIAuIBdKgboYYdVved2p1AxZVQnaw==", + "dependencies": { + "@types/prop-types": "*", + "@types/scheduler": "*", + "csstype": "^3.0.2" + } + }, + "node_modules/@types/react-dom": { + "version": "18.2.15", + "resolved": "https://registry.npmjs.org/@types/react-dom/-/react-dom-18.2.15.tgz", + "integrity": "sha512-HWMdW+7r7MR5+PZqJF6YFNSCtjz1T0dsvo/f1BV6HkV+6erD/nA7wd9NM00KVG83zf2nJ7uATPO9ttdIPvi3gg==", + "devOptional": true, + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/scheduler": { + "version": "0.16.3", + "resolved": "https://registry.npmjs.org/@types/scheduler/-/scheduler-0.16.3.tgz", + "integrity": "sha512-5cJ8CB4yAx7BH1oMvdU0Jh9lrEXyPkar6F9G/ERswkCuvP4KQZfZkSjcMbAICCpQTN4OuZn8tz0HiKv9TGZgrQ==" + }, + "node_modules/@types/semver": { + "version": "7.5.5", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.5.tgz", + "integrity": "sha512-+d+WYC1BxJ6yVOgUgzK8gWvp5qF8ssV5r4nsDcZWKRWcDQLQ619tvWAxJQYGgBrO1MnLJC7a5GtiYsAoQ47dJg==", + "dev": true + }, + "node_modules/@types/swagger-ui-react": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@types/swagger-ui-react/-/swagger-ui-react-4.18.1.tgz", + "integrity": "sha512-nYhNi+cyN78vve1/QY5PNKYzHYlDKETtXj+gQAhuoCRB+GxGT3MVJUj8WCdwYj4vF0s1j68qkLv/66DGe5ZlnA==", + "dev": true, + "dependencies": { + "@types/react": "*" + } + }, + "node_modules/@types/tern": { + "version": "0.23.6", + "resolved": "https://registry.npmjs.org/@types/tern/-/tern-0.23.6.tgz", + "integrity": "sha512-ntalN+F2msUwz7/OCCADN4FwxtIGqF4Hqwxd15yAn0VOUozj1VaIrH4Prh95N8y69K3bQpHFVGwTJDZC4oRtvA==", + "dependencies": { + "@types/estree": "*" + } + }, + "node_modules/@types/unist": { + "version": "2.0.8", + "resolved": "https://registry.npmjs.org/@types/unist/-/unist-2.0.8.tgz", + "integrity": "sha512-d0XxK3YTObnWVp6rZuev3c49+j4Lo8g4L1ZRm9z5L0xpoZycUPshHgczK5gsUMaZOstjVYYi09p5gYvUtfChYw==" + }, + "node_modules/@types/use-sync-external-store": { + "version": "0.0.3", + "resolved": "https://registry.npmjs.org/@types/use-sync-external-store/-/use-sync-external-store-0.0.3.tgz", + "integrity": "sha512-EwmlvuaxPNej9+T4v5AuBPJa2x2UOJVdjCtDHgcDqitUeOtjnJKJ+apYjVcAoBEMjKW1VVFGZLUb5+qqa09XFA==" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "6.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.10.0.tgz", + "integrity": "sha512-uoLj4g2OTL8rfUQVx2AFO1hp/zja1wABJq77P6IclQs6I/m9GLrm7jCdgzZkvWdDCQf1uEvoa8s8CupsgWQgVg==", + "dev": true, + "dependencies": { + "@eslint-community/regexpp": "^4.5.1", + "@typescript-eslint/scope-manager": "6.10.0", + "@typescript-eslint/type-utils": "6.10.0", + "@typescript-eslint/utils": "6.10.0", + "@typescript-eslint/visitor-keys": "6.10.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.4", + "natural-compare": "^1.4.0", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.11.0.tgz", + "integrity": "sha512-+whEdjk+d5do5nxfxx73oanLL9ghKO3EwM9kBCkUtWMRwWuPaFv9ScuqlYfQ6pAD6ZiJhky7TZ2ZYhrMsfMxVQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/scope-manager": "6.11.0", + "@typescript-eslint/types": "6.11.0", + "@typescript-eslint/typescript-estree": "6.11.0", + "@typescript-eslint/visitor-keys": "6.11.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/scope-manager": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.11.0.tgz", + "integrity": "sha512-0A8KoVvIURG4uhxAdjSaxy8RdRE//HztaZdG8KiHLP8WOXSk0vlF7Pvogv+vlJA5Rnjj/wDcFENvDaHb+gKd1A==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.11.0", + "@typescript-eslint/visitor-keys": "6.11.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/types": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.11.0.tgz", + "integrity": "sha512-ZbEzuD4DwEJxwPqhv3QULlRj8KYTAnNsXxmfuUXFCxZmO6CF2gM/y+ugBSAQhrqaJL3M+oe4owdWunaHM6beqA==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/typescript-estree": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.11.0.tgz", + "integrity": "sha512-Aezzv1o2tWJwvZhedzvD5Yv7+Lpu1by/U1LZ5gLc4tCx8jUmuSCMioPFRjliN/6SJIvY6HpTtJIWubKuYYYesQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.11.0", + "@typescript-eslint/visitor-keys": "6.11.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser/node_modules/@typescript-eslint/visitor-keys": { + "version": "6.11.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.11.0.tgz", + "integrity": "sha512-+SUN/W7WjBr05uRxPggJPSzyB8zUpaYo2hByKasWbqr3PM8AXfZt8UHdNpBS1v9SA62qnSSMF3380SwDqqprgQ==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.11.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "6.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.10.0.tgz", + "integrity": "sha512-TN/plV7dzqqC2iPNf1KrxozDgZs53Gfgg5ZHyw8erd6jd5Ta/JIEcdCheXFt9b1NYb93a1wmIIVW/2gLkombDg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.10.0", + "@typescript-eslint/visitor-keys": "6.10.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "6.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.10.0.tgz", + "integrity": "sha512-wYpPs3hgTFblMYwbYWPT3eZtaDOjbLyIYuqpwuLBBqhLiuvJ+9sEp2gNRJEtR5N/c9G1uTtQQL5AhV0fEPJYcg==", + "dev": true, + "dependencies": { + "@typescript-eslint/typescript-estree": "6.10.0", + "@typescript-eslint/utils": "6.10.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "6.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.10.0.tgz", + "integrity": "sha512-36Fq1PWh9dusgo3vH7qmQAj5/AZqARky1Wi6WpINxB6SkQdY5vQoT2/7rW7uBIsPDcvvGCLi4r10p0OJ7ITAeg==", + "dev": true, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "6.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.10.0.tgz", + "integrity": "sha512-ek0Eyuy6P15LJVeghbWhSrBCj/vJpPXXR+EpaRZqou7achUWL8IdYnMSC5WHAeTWswYQuP2hAZgij/bC9fanBg==", + "dev": true, + "dependencies": { + "@typescript-eslint/types": "6.10.0", + "@typescript-eslint/visitor-keys": "6.10.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } } }, "node_modules/@typescript-eslint/utils": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.7.0.tgz", - "integrity": "sha512-MfCq3cM0vh2slSikQYqK2Gq52gvOhe57vD2RM3V4gQRZYX4rDPnKLu5p6cm89+LJiGlwEXU8hkYxhqqEC/V3qA==", + "version": "6.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.10.0.tgz", + "integrity": "sha512-v+pJ1/RcVyRc0o4wAGux9x42RHmAjIGzPRo538Z8M1tVx6HOnoQBCX/NoadHQlZeC+QO2yr4nNSFWOoraZCAyg==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.4.0", "@types/json-schema": "^7.0.12", "@types/semver": "^7.5.0", - "@typescript-eslint/scope-manager": "6.7.0", - "@typescript-eslint/types": "6.7.0", - "@typescript-eslint/typescript-estree": "6.7.0", + "@typescript-eslint/scope-manager": "6.10.0", + "@typescript-eslint/types": "6.10.0", + "@typescript-eslint/typescript-estree": "6.10.0", "semver": "^7.5.4" }, "engines": { @@ -1725,12 +2384,12 @@ } }, "node_modules/@typescript-eslint/visitor-keys": { - "version": "6.7.0", - "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.7.0.tgz", - "integrity": "sha512-/C1RVgKFDmGMcVGeD8HjKv2bd72oI1KxQDeY8uc66gw9R0OK0eMq48cA+jv9/2Ag6cdrsUGySm1yzYmfz0hxwQ==", + "version": "6.10.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.10.0.tgz", + "integrity": "sha512-xMGluxQIEtOM7bqFCo+rCMh5fqI+ZxV5RUUOa29iVPz1OgCZrtc7rFnz5cLUazlkPKYqX+75iuDq7m0HQ48nCg==", "dev": true, "dependencies": { - "@typescript-eslint/types": "6.7.0", + "@typescript-eslint/types": "6.10.0", "eslint-visitor-keys": "^3.4.1" }, "engines": { @@ -1741,22 +2400,33 @@ "url": "https://opencollective.com/typescript-eslint" } }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "dev": true + }, "node_modules/@vitejs/plugin-react-swc": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.3.2.tgz", - "integrity": "sha512-VJFWY5sfoZerQRvJrh518h3AcQt6f/yTuWn4/TRB+dqmYU0NX1qz7qM5Wfd+gOQqUzQW4gxKqKN3KpE/P3+zrA==", + "version": "3.4.1", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-react-swc/-/plugin-react-swc-3.4.1.tgz", + "integrity": "sha512-7YQOQcVV5x1luD8nkbCDdyYygFvn1hjqJk68UvNAzY2QG4o4N5EwAhLLFNOcd1HrdMwDl0VElP8VutoWf9IvJg==", "dev": true, "dependencies": { - "@swc/core": "^1.3.61" + "@swc/core": "^1.3.95" }, "peerDependencies": { "vite": "^4" } }, + "node_modules/@yarnpkg/lockfile": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz", + "integrity": "sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==" + }, "node_modules/acorn": { - "version": "8.10.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.10.0.tgz", - "integrity": "sha512-F0SAmZ8iUtS//m8DmCTA0jlh6TDKkHQyK6xc6V4KDTyZKA9dnvX9/3sRTVQrWm79glUAZbnmmNcdYwUIHWVybw==", + "version": "8.11.2", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.2.tgz", + "integrity": "sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==", "dev": true, "bin": { "acorn": "bin/acorn" @@ -1803,7 +2473,6 @@ "version": "4.3.0", "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", - "dev": true, "dependencies": { "color-convert": "^2.0.1" }, @@ -1830,11 +2499,6 @@ "node": ">=10" } }, - "node_modules/aria-hidden/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/array-union": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", @@ -1844,17 +2508,76 @@ "node": ">=8" } }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==" + }, + "node_modules/at-least-node": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/at-least-node/-/at-least-node-1.0.0.tgz", + "integrity": "sha512-+q/t7Ekv1EDY2l6Gda6LLiX14rU9TV20Wa3ofeQmwPFZbOMo9DXrLbOjFaaclkXKWidIaopwAObQDqwWtGUjqg==", + "engines": { + "node": ">= 4.0.0" + } + }, + "node_modules/autolinker": { + "version": "3.16.2", + "resolved": "https://registry.npmjs.org/autolinker/-/autolinker-3.16.2.tgz", + "integrity": "sha512-JiYl7j2Z19F9NdTmirENSUUIIL/9MytEWtmzhfmsKPCp9E+G35Y0UNCMoM9tFigxT59qSc8Ml2dlZXOCVTYwuA==", + "dependencies": { + "tslib": "^2.3.0" + } + }, + "node_modules/axios": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.6.1.tgz", + "integrity": "sha512-vfBmhDpKafglh0EldBEbVuoe7DyAavGSLWhuSm5ZSEKQnHhBf0xAAwybbNH1IkrJNGnS/VG4I5yxig1pCEXE4g==", + "dependencies": { + "follow-redirects": "^1.15.0", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" + } + }, "node_modules/balanced-match": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==" + }, + "node_modules/base64-js": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", + "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, + "node_modules/bl": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", + "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", + "optional": true, + "dependencies": { + "buffer": "^5.5.0", + "inherits": "^2.0.4", + "readable-stream": "^3.4.0" + } }, "node_modules/brace-expansion": { "version": "1.1.11", "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, "dependencies": { "balanced-match": "^1.0.0", "concat-map": "0.0.1" @@ -1864,7 +2587,6 @@ "version": "3.0.2", "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.2.tgz", "integrity": "sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==", - "dev": true, "dependencies": { "fill-range": "^7.0.1" }, @@ -1872,6 +2594,43 @@ "node": ">=8" } }, + "node_modules/buffer": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", + "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "optional": true, + "dependencies": { + "base64-js": "^1.3.1", + "ieee754": "^1.1.13" + } + }, + "node_modules/call-bind": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.5.tgz", + "integrity": "sha512-C3nQxfFZxFRVoJoGKKI8y3MOEo129NQ+FgQ08iye+Mk4zNZZGdjfs06bVTr+DBSlA66Q2VEcMki/cUCP4SercQ==", + "dependencies": { + "function-bind": "^1.1.2", + "get-intrinsic": "^1.2.1", + "set-function-length": "^1.1.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/callsites": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", @@ -1885,7 +2644,6 @@ "version": "4.1.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, "dependencies": { "ansi-styles": "^4.1.0", "supports-color": "^7.1.0" @@ -1897,6 +2655,58 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, + "node_modules/character-entities": { + "version": "1.2.4", + "resolved": "https://registry.npmjs.org/character-entities/-/character-entities-1.2.4.tgz", + "integrity": "sha512-iBMyeEHxfVnIakwOuDXpVkc54HijNgCyQB2w0VfGQThle6NXn50zU6V/u+LDhxHcDUPojn6Kpga3PTAD8W1bQw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-entities-legacy": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-entities-legacy/-/character-entities-legacy-1.1.4.tgz", + "integrity": "sha512-3Xnr+7ZFS1uxeiUDvV02wQ+QDbc55o97tIV5zHScSPJpcLm/r0DFPcoY3tYRp+VZukxuMeKgXYmsXQHO05zQeA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/character-reference-invalid": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/character-reference-invalid/-/character-reference-invalid-1.1.4.tgz", + "integrity": "sha512-mKKUkUbhPpQlCOfIuZkvSEgktjPFIsZKRRbC6KWVEMvlzblj3i3asQv5ODsrwt0N3pHAEvjP8KTQPHkp0+6jOg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/chownr": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", + "optional": true + }, + "node_modules/ci-info": { + "version": "3.9.0", + "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", + "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/sibiraj-s" + } + ], + "engines": { + "node": ">=8" + } + }, + "node_modules/classnames": { + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/classnames/-/classnames-2.3.2.tgz", + "integrity": "sha512-CSbhY4cFEJRe6/GQzIk5qXZ4Jeg5pcsP7b5peFSDpffpe1cqjASH/n9UTjBwOp6XpMSTwQ8Za2K5V02ueA7Tmw==" + }, "node_modules/client-only": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", @@ -1911,16 +2721,17 @@ } }, "node_modules/codemirror": { - "version": "5.65.14", - "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-5.65.14.tgz", - "integrity": "sha512-VSNugIBDGt0OU9gDjeVr6fNkoFQznrWEUdAApMlXQNbfE8gGO19776D6MwSqF/V/w/sDwonsQ0z7KmmI9guScg==" + "version": "5.65.15", + "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-5.65.15.tgz", + "integrity": "sha512-YC4EHbbwQeubZzxLl5G4nlbLc1T21QTrKGaOal/Pkm9dVDMZXMH7+ieSPEOZCtO9I68i8/oteJKOxzHC2zR+0g==" }, "node_modules/codemirror-graphql": { - "version": "2.0.9", - "resolved": "https://registry.npmjs.org/codemirror-graphql/-/codemirror-graphql-2.0.9.tgz", - "integrity": "sha512-gl1LR6XSBgZtl7Dr2q4jjRNfhxMF8vn+rnjZTZPf/l+VrQgavY8l3G//hW7s3hWy73iiqkq5LZ4KE1tdaxB/vQ==", + "version": "2.0.10", + "resolved": "https://registry.npmjs.org/codemirror-graphql/-/codemirror-graphql-2.0.10.tgz", + "integrity": "sha512-rC9NxibCsSzWtCQjHLfwKCkyYdGv2BT/BCgyDoKPrc/e7aGiyLyeT0fB60d+0imwlvhX3lIHncl6JMz2YxQ/jg==", "dependencies": { - "graphql-language-service": "5.1.7" + "@types/codemirror": "^0.0.90", + "graphql-language-service": "5.2.0" }, "peerDependencies": { "@codemirror/language": "6.0.0", @@ -1928,11 +2739,18 @@ "graphql": "^15.5.0 || ^16.0.0" } }, + "node_modules/codemirror-graphql/node_modules/@types/codemirror": { + "version": "0.0.90", + "resolved": "https://registry.npmjs.org/@types/codemirror/-/codemirror-0.0.90.tgz", + "integrity": "sha512-8Z9+tSg27NPRGubbUPUCrt5DDG/OWzLph5BvcDykwR5D7RyZh5mhHG0uS1ePKV1YFCA+/cwc4Ey2AJAEFfV3IA==", + "dependencies": { + "@types/tern": "*" + } + }, "node_modules/color-convert": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", - "dev": true, "dependencies": { "color-name": "~1.1.4" }, @@ -1943,14 +2761,40 @@ "node_modules/color-name": { "version": "1.1.4", "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", - "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", - "dev": true + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/comma-separated-tokens": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/comma-separated-tokens/-/comma-separated-tokens-1.0.8.tgz", + "integrity": "sha512-GHuDRO12Sypu2cV70d1dkA2EUmXHgntrzbpvOB+Qy+49ypNfGgFQIC2fhhXbnyrJRynDCAARsT7Ou0M6hirpfw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } }, "node_modules/concat-map": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==" + }, + "node_modules/cookie": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", + "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "engines": { + "node": ">= 0.6" + } }, "node_modules/copy-to-clipboard": { "version": "3.3.3", @@ -1960,11 +2804,20 @@ "toggle-selection": "^1.0.6" } }, + "node_modules/core-js-pure": { + "version": "3.33.2", + "resolved": "https://registry.npmjs.org/core-js-pure/-/core-js-pure-3.33.2.tgz", + "integrity": "sha512-a8zeCdyVk7uF2elKIGz67AjcXOxjRbwOLz8SbklEso1V+2DoW4OkAMZN9S9GBgvZIaqQi/OemFX4OiSoQEmg1Q==", + "hasInstallScript": true, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/core-js" + } + }, "node_modules/cross-spawn": { "version": "7.0.3", "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", - "dev": true, "dependencies": { "path-key": "^3.1.0", "shebang-command": "^2.0.0", @@ -1974,11 +2827,15 @@ "node": ">= 8" } }, + "node_modules/css.escape": { + "version": "1.5.1", + "resolved": "https://registry.npmjs.org/css.escape/-/css.escape-1.5.1.tgz", + "integrity": "sha512-YUifsXXuknHlUsmlgyY0PKzgPOr7/FjCePfHNt0jxm83wHZi44VDMQ7/fGNkjY3/jV1MC+1CmZbaHzugyeRtpg==" + }, "node_modules/csstype": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.2.tgz", - "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==", - "devOptional": true + "integrity": "sha512-I7K1Uu0MBPzaFKg4nI5Q7Vs2t+3gWWW648spaF+Rg7pI9ds18Ugn+lvg4SHczUdKlHI5LWBXyqfS8+DufyBsgQ==" }, "node_modules/debug": { "version": "4.3.4", @@ -1997,12 +2854,73 @@ } } }, + "node_modules/decompress-response": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", + "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", + "optional": true, + "dependencies": { + "mimic-response": "^3.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/deep-extend": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", + "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", + "engines": { + "node": ">=4.0.0" + } + }, "node_modules/deep-is": { "version": "0.1.4", "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", "dev": true }, + "node_modules/deepmerge": { + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.3.1.tgz", + "integrity": "sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/define-data-property": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.1.tgz", + "integrity": "sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ==", + "dependencies": { + "get-intrinsic": "^1.2.1", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/detect-libc": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.2.tgz", + "integrity": "sha512-UX6sGumvvqSaXgdKGUsgZWqcUyIXZ/vZTrlRT/iobiKhGL0zL4d3osHj3uqllWJK+i+sixDS/3COVEOFbupFyw==", + "optional": true, + "engines": { + "node": ">=8" + } + }, "node_modules/detect-node-es": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/detect-node-es/-/detect-node-es-1.1.0.tgz", @@ -2032,6 +2950,28 @@ "node": ">=6.0.0" } }, + "node_modules/dompurify": { + "version": "3.0.6", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.0.6.tgz", + "integrity": "sha512-ilkD8YEnnGh1zJ240uJsW7AzE+2qpbOUYjacomn3AvJ6J4JhKGSZ2nh4wUIXPZrEPppaCLx5jFe8T89Rk8tQ7w==" + }, + "node_modules/drange": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/drange/-/drange-1.1.1.tgz", + "integrity": "sha512-pYxfDYpued//QpnLIm4Avk7rsNtAtQkUES2cwAYSvD/wd2pKD71gN2Ebj3e7klzXwjocvE8c5vx/1fxwpqmSxA==", + "engines": { + "node": ">=4" + } + }, + "node_modules/end-of-stream": { + "version": "1.4.4", + "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", + "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", + "optional": true, + "dependencies": { + "once": "^1.4.0" + } + }, "node_modules/entities": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", @@ -2090,18 +3030,19 @@ } }, "node_modules/eslint": { - "version": "8.49.0", - "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.49.0.tgz", - "integrity": "sha512-jw03ENfm6VJI0jA9U+8H5zfl5b+FvuU3YYvZRdZHOlU2ggJkxrlkJH4HcDrZpj6YwD8kuYqvQM8LyesoazrSOQ==", + "version": "8.53.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.53.0.tgz", + "integrity": "sha512-N4VuiPjXDUa4xVeV/GC/RV3hQW9Nw+Y463lkWaKKXKYMvmRiRDAtfpuPFLN+E1/6ZhyR8J2ig+eVREnYgUsiag==", "dev": true, "dependencies": { "@eslint-community/eslint-utils": "^4.2.0", "@eslint-community/regexpp": "^4.6.1", - "@eslint/eslintrc": "^2.1.2", - "@eslint/js": "8.49.0", - "@humanwhocodes/config-array": "^0.11.11", + "@eslint/eslintrc": "^2.1.3", + "@eslint/js": "8.53.0", + "@humanwhocodes/config-array": "^0.11.13", "@humanwhocodes/module-importer": "^1.0.1", "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", "ajv": "^6.12.4", "chalk": "^4.0.0", "cross-spawn": "^7.0.2", @@ -2156,9 +3097,9 @@ } }, "node_modules/eslint-plugin-react-refresh": { - "version": "0.4.3", - "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.3.tgz", - "integrity": "sha512-Hh0wv8bUNY877+sI0BlCUlsS0TYYQqvzEwJsJJPM2WF4RnTStSnSR3zdJYa2nPOJgg3UghXi54lVyMSmpCalzA==", + "version": "0.4.4", + "resolved": "https://registry.npmjs.org/eslint-plugin-react-refresh/-/eslint-plugin-react-refresh-0.4.4.tgz", + "integrity": "sha512-eD83+65e8YPVg6603Om2iCIwcQJf/y7++MWm4tACtEswFLYMwxwVWAfwN+e19f5Ad/FOyyNg9Dfi5lXhH3Y3rA==", "dev": true, "peerDependencies": { "eslint": ">=7" @@ -2269,6 +3210,15 @@ "node": ">=0.10.0" } }, + "node_modules/expand-template": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", + "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==", + "optional": true, + "engines": { + "node": ">=6" + } + }, "node_modules/fast-deep-equal": { "version": "3.1.3", "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", @@ -2329,6 +3279,18 @@ "reusify": "^1.0.4" } }, + "node_modules/fault": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/fault/-/fault-1.0.4.tgz", + "integrity": "sha512-CJ0HCB5tL5fYTEA7ToAq5+kTwd++Borf1/bifxd9iT70QcXr4MRrO3Llf8Ifs70q+SJcGHFtnIE/Nw6giCtECA==", + "dependencies": { + "format": "^0.2.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/file-entry-cache": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", @@ -2345,7 +3307,6 @@ "version": "7.0.1", "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.0.1.tgz", "integrity": "sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==", - "dev": true, "dependencies": { "to-regex-range": "^5.0.1" }, @@ -2369,6 +3330,14 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/find-yarn-workspace-root": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/find-yarn-workspace-root/-/find-yarn-workspace-root-2.0.0.tgz", + "integrity": "sha512-1IMnbjt4KzsQfnhnzNd8wUEgXZ44IzZaZmnLYx7D5FZlaHt2gW20Cri8Q+E/t5tIj4+epTBub+2Zxu/vNILzqQ==", + "dependencies": { + "micromatch": "^4.0.2" + } + }, "node_modules/flat-cache": { "version": "3.0.4", "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.0.4.tgz", @@ -2388,6 +3357,46 @@ "integrity": "sha512-5nqDSxl8nn5BSNxyR3n4I6eDmbolI6WT+QqR547RwxQapgjQBmtktdP+HTBb/a/zLsbzERTONyUB5pefh5TtjQ==", "dev": true }, + "node_modules/follow-redirects": { + "version": "1.15.3", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.3.tgz", + "integrity": "sha512-1VzOtuEM8pC9SFU1E+8KfTjZyMztRsgEfwQl44z8A25uy13jSzTj6dyK2Df52iV0vgHCfBwLhDWevLn95w5v6Q==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/format": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/format/-/format-0.2.2.tgz", + "integrity": "sha512-wzsgA6WOq+09wrU1tsJ09udeR/YZRaeArL9e1wPbFg3GG2yDnC2ldKpxs4xunpFF9DgqCqOIra3bc1HWrJ37Ww==", + "engines": { + "node": ">=0.4.x" + } + }, "node_modules/framer-motion": { "version": "6.5.1", "resolved": "https://registry.npmjs.org/framer-motion/-/framer-motion-6.5.1.tgz", @@ -2408,11 +3417,6 @@ "react-dom": ">=16.8 || ^17.0.0 || ^18.0.0" } }, - "node_modules/framer-motion/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/framesync": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/framesync/-/framesync-6.0.1.tgz", @@ -2421,16 +3425,30 @@ "tslib": "^2.1.0" } }, - "node_modules/framesync/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + "node_modules/fs-constants": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", + "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", + "optional": true + }, + "node_modules/fs-extra": { + "version": "9.1.0", + "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-9.1.0.tgz", + "integrity": "sha512-hcg3ZmepS30/7BSFqRvoo3DOMQu7IjqxO5nCDt+zM9XWjb33Wg7ziNT+Qvqbuc3+gWpzO02JubVyk2G4Zvo1OQ==", + "dependencies": { + "at-least-node": "^1.0.0", + "graceful-fs": "^4.2.0", + "jsonfile": "^6.0.1", + "universalify": "^2.0.0" + }, + "engines": { + "node": ">=10" + } }, "node_modules/fs.realpath": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "dev": true + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==" }, "node_modules/fsevents": { "version": "2.3.2", @@ -2446,6 +3464,28 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, + "node_modules/function-bind": { + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", + "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/get-intrinsic": { + "version": "1.2.2", + "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.2.2.tgz", + "integrity": "sha512-0gSo4ml/0j98Y3lngkFEot/zhiCeWsbYIlZ+uZOVgzLyLaUw7wxUL+nCTP0XJvJg1AXulJRI3UJi8GsbDuxdGA==", + "dependencies": { + "function-bind": "^1.1.2", + "has-proto": "^1.0.1", + "has-symbols": "^1.0.3", + "hasown": "^2.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/get-nonce": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/get-nonce/-/get-nonce-1.0.1.tgz", @@ -2454,11 +3494,16 @@ "node": ">=6" } }, + "node_modules/github-from-package": { + "version": "0.0.0", + "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", + "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==", + "optional": true + }, "node_modules/glob": { "version": "7.2.3", "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "dev": true, "dependencies": { "fs.realpath": "^1.0.0", "inflight": "^1.0.4", @@ -2487,9 +3532,9 @@ } }, "node_modules/globals": { - "version": "13.21.0", - "resolved": "https://registry.npmjs.org/globals/-/globals-13.21.0.tgz", - "integrity": "sha512-ybyme3s4yy/t/3s35bewwXKOf7cvzfreG2lH0lZl0JB7I4GxRP2ghxOK/Nb9EkRXdbBXZLfq/p/0W2JUONB/Gg==", + "version": "13.23.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.23.0.tgz", + "integrity": "sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==", "dev": true, "dependencies": { "type-fest": "^0.20.2" @@ -2521,6 +3566,22 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/gopd": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.0.1.tgz", + "integrity": "sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA==", + "dependencies": { + "get-intrinsic": "^1.1.3" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/graceful-fs": { + "version": "4.2.11", + "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", + "integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==" + }, "node_modules/graphemer": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", @@ -2528,13 +3589,13 @@ "dev": true }, "node_modules/graphiql": { - "version": "3.0.5", - "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.0.5.tgz", - "integrity": "sha512-R02CKVXPajOmJcg0TAKuRMU8qvwb7ltGDYqbaQMKbLeYYw/wQUrmTxLwdVuRadgRL4ubNzl3q5vKTkQKR5Ay2Q==", + "version": "3.0.9", + "resolved": "https://registry.npmjs.org/graphiql/-/graphiql-3.0.9.tgz", + "integrity": "sha512-xl9yEr6U4Wc3wmqvtP2sV2a3zGQkqrAMtU90x45QnpNT9MBgBn38HD1Yg5jExXxER65xmYWlGoYdAiD8v/dbEw==", "dependencies": { - "@graphiql/react": "^0.19.3", + "@graphiql/react": "^0.20.2", "@graphiql/toolkit": "^0.9.1", - "graphql-language-service": "^5.1.7", + "graphql-language-service": "^5.2.0", "markdown-it": "^12.2.0" }, "peerDependencies": { @@ -2544,17 +3605,17 @@ } }, "node_modules/graphql": { - "version": "16.8.0", - "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.0.tgz", - "integrity": "sha512-0oKGaR+y3qcS5mCu1vb7KG+a89vjn06C7Ihq/dDl3jA+A8B3TKomvi3CiEcVLJQGalbu8F52LxkOym7U5sSfbg==", + "version": "16.8.1", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.1.tgz", + "integrity": "sha512-59LZHPdGZVh695Ud9lRzPBVTtlX9ZCV150Er2W43ro37wVof0ctenSaskPPjN7lVTIN8mSZt8PHUNKZuNQUuxw==", "engines": { "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" } }, "node_modules/graphql-language-service": { - "version": "5.1.7", - "resolved": "https://registry.npmjs.org/graphql-language-service/-/graphql-language-service-5.1.7.tgz", - "integrity": "sha512-xkawYMJeoNYGhT+SpSH3c2qf6HpGHQ/duDmrseVHBpVCrXAiGnliXGSCC4jyMGgZQ05GytsZ12p0nUo7s6lSSw==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/graphql-language-service/-/graphql-language-service-5.2.0.tgz", + "integrity": "sha512-o/ZgTS0pBxWm3hSF4+6GwiV1//DxzoLWEbS38+jqpzzy1d/QXBidwQuVYTOksclbtOJZ3KR/tZ8fi/tI6VpVMg==", "dependencies": { "nullthrows": "^1.0.0", "vscode-languageserver-types": "^3.17.1" @@ -2570,16 +3631,119 @@ "version": "4.0.0", "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", - "dev": true, "engines": { "node": ">=8" } }, + "node_modules/has-property-descriptors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.1.tgz", + "integrity": "sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg==", + "dependencies": { + "get-intrinsic": "^1.2.2" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-proto": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/has-proto/-/has-proto-1.0.1.tgz", + "integrity": "sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/has-symbols": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.0.3.tgz", + "integrity": "sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A==", + "engines": { + "node": ">= 0.4" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/hasown": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.0.tgz", + "integrity": "sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==", + "dependencies": { + "function-bind": "^1.1.2" + }, + "engines": { + "node": ">= 0.4" + } + }, + "node_modules/hast-util-parse-selector": { + "version": "2.2.5", + "resolved": "https://registry.npmjs.org/hast-util-parse-selector/-/hast-util-parse-selector-2.2.5.tgz", + "integrity": "sha512-7j6mrk/qqkSehsM92wQjdIgWM2/BW61u/53G6xmC8i1OmEdKLHbk419QKQUjz6LglWsfqoiHmyMRkP1BGjecNQ==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, + "node_modules/hastscript": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/hastscript/-/hastscript-6.0.0.tgz", + "integrity": "sha512-nDM6bvd7lIqDUiYEiu5Sl/+6ReP0BMk/2f4U/Rooccxkj0P5nm+acM5PrGJ/t5I8qPGiqZSE6hVAwZEdZIvP4w==", + "dependencies": { + "@types/hast": "^2.0.0", + "comma-separated-tokens": "^1.0.0", + "hast-util-parse-selector": "^2.0.0", + "property-information": "^5.0.0", + "space-separated-tokens": "^1.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/unified" + } + }, "node_modules/hey-listen": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/hey-listen/-/hey-listen-1.0.8.tgz", "integrity": "sha512-COpmrF2NOg4TBWUJ5UVyaCU2A88wEMkUPK4hNqyCkqHbxT92BbvfjoSozkAIIm6XhicGlJHhFdullInrdhwU8Q==" }, + "node_modules/highlight.js": { + "version": "10.7.3", + "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz", + "integrity": "sha512-tzcUFauisWKNHaRkN4Wjl/ZA07gENAjFl3J/c480dprkGTg5EQstgaNFqBfUqCq54kZRIEcreTsAgF/m2quD7A==", + "engines": { + "node": "*" + } + }, + "node_modules/hoist-non-react-statics": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz", + "integrity": "sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==", + "dependencies": { + "react-is": "^16.7.0" + } + }, + "node_modules/ieee754": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", + "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, "node_modules/ignore": { "version": "5.2.4", "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.2.4.tgz", @@ -2589,6 +3753,14 @@ "node": ">= 4" } }, + "node_modules/immutable": { + "version": "3.8.2", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-3.8.2.tgz", + "integrity": "sha512-15gZoQ38eYjEjxkorfbcgBKBL6R7T459OuK+CpcWt7O3KF4uPCx2tD0uFETlUDIyo+1789crbMhTvQBSR5yBMg==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/import-fresh": { "version": "3.3.0", "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", @@ -2618,7 +3790,6 @@ "version": "1.0.6", "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "dev": true, "dependencies": { "once": "^1.3.0", "wrappy": "1" @@ -2627,8 +3798,13 @@ "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", - "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", - "dev": true + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==" + }, + "node_modules/ini": { + "version": "1.3.8", + "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", + "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", + "optional": true }, "node_modules/invariant": { "version": "2.2.4", @@ -2638,6 +3814,51 @@ "loose-envify": "^1.0.0" } }, + "node_modules/is-alphabetical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphabetical/-/is-alphabetical-1.0.4.tgz", + "integrity": "sha512-DwzsA04LQ10FHTZuL0/grVDk4rFoVH1pjAToYwBrHSxcrBIGQuXrQMtD5U1b0U2XVgKZCTLLP8u2Qxqhy3l2Vg==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-alphanumerical": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-alphanumerical/-/is-alphanumerical-1.0.4.tgz", + "integrity": "sha512-UzoZUr+XfVz3t3v4KyGEniVL9BDRoQtY7tOyrRybkVNjDFWyo1yhXNGrrBTQxp3ib9BLAWs7k2YKBQsFRkZG9A==", + "dependencies": { + "is-alphabetical": "^1.0.0", + "is-decimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-decimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-decimal/-/is-decimal-1.0.4.tgz", + "integrity": "sha512-RGdriMmQQvZ2aqaQq3awNA6dCGtKpiDFcOzrTWrDAT2MiWrKQVPmxLGHl7Y2nNu6led0kEyoX0enY0qXYsv9zw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/is-docker": { + "version": "2.2.1", + "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", + "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", + "bin": { + "is-docker": "cli.js" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/is-extglob": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", @@ -2659,11 +3880,19 @@ "node": ">=0.10.0" } }, + "node_modules/is-hexadecimal": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/is-hexadecimal/-/is-hexadecimal-1.0.4.tgz", + "integrity": "sha512-gyPJuv83bHMpocVYoqof5VDiZveEoGoFL8m3BXNb2VW8Xs+rz9kqO8LOQ5DH6EsuvilT1ApazU0pyl+ytbPtlw==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/is-number": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, "engines": { "node": ">=0.12.0" } @@ -2696,11 +3925,21 @@ "node": ">=0.10.0" } }, + "node_modules/is-wsl": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", + "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", + "dependencies": { + "is-docker": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/isexe": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==" }, "node_modules/isobject": { "version": "3.0.1", @@ -2710,6 +3949,11 @@ "node": ">=0.10.0" } }, + "node_modules/js-file-download": { + "version": "0.4.12", + "resolved": "https://registry.npmjs.org/js-file-download/-/js-file-download-0.4.12.tgz", + "integrity": "sha512-rML+NkoD08p5Dllpjo0ffy4jRHeY6Zsapvr/W86N7E0yuzAO6qa5X9+xog6zQNlH102J7IXljNY2FtS6Lj3ucg==" + }, "node_modules/js-tokens": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz", @@ -2719,7 +3963,6 @@ "version": "4.1.0", "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", - "dev": true, "dependencies": { "argparse": "^2.0.1" }, @@ -2733,12 +3976,50 @@ "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", "dev": true }, + "node_modules/json-stable-stringify": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.0.2.tgz", + "integrity": "sha512-eunSSaEnxV12z+Z73y/j5N37/In40GK4GmsSy+tEHJMxknvqnA7/djeYtAgW0GsWHUfg+847WJjKaEylk2y09g==", + "dependencies": { + "jsonify": "^0.0.1" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/json-stable-stringify-without-jsonify": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", "dev": true }, + "node_modules/jsonfile": { + "version": "6.1.0", + "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.1.0.tgz", + "integrity": "sha512-5dgndWOriYSm5cnYaJNhalLNDKOqFwyDB/rr1E9ZsGciGvKPs8R2xYGCacuf3z6K1YKDz182fd+fY3cn3pMqXQ==", + "dependencies": { + "universalify": "^2.0.0" + }, + "optionalDependencies": { + "graceful-fs": "^4.1.6" + } + }, + "node_modules/jsonify": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.1.tgz", + "integrity": "sha512-2/Ki0GcmuqSrgFyelQq9M05y7PS0mEwuIzrf3f1fPqkVDVRvZrPZtVSMHxdgo8Aq0sxAOb/cr2aqqA3LeWHVPg==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/klaw-sync": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", + "integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==", + "dependencies": { + "graceful-fs": "^4.1.11" + } + }, "node_modules/levn": { "version": "0.4.1", "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", @@ -2775,6 +4056,16 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==" + }, + "node_modules/lodash.debounce": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/lodash.debounce/-/lodash.debounce-4.0.8.tgz", + "integrity": "sha512-FT1yDzDYEoYWhnSGnpE/4Kj1fLZkDFyqRb7fNt6FdYOSxlUWAtp42Eh6Wb0rGIv/m9Bgo7x4GhQbm5Ys4SG5ow==" + }, "node_modules/lodash.merge": { "version": "4.6.2", "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", @@ -2792,11 +4083,23 @@ "loose-envify": "cli.js" } }, + "node_modules/lowlight": { + "version": "1.20.0", + "resolved": "https://registry.npmjs.org/lowlight/-/lowlight-1.20.0.tgz", + "integrity": "sha512-8Ktj+prEb1RoCPkEOrPMYUN/nCggB7qAWe3a7OpMjWQkh3l2RD5wKRQ+o8Q8YuI9RG/xs95waaI/E6ym/7NsTw==", + "dependencies": { + "fault": "^1.0.0", + "highlight.js": "~10.7.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, "node_modules/lru-cache": { "version": "6.0.0", "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, "dependencies": { "yallist": "^4.0.0" }, @@ -2853,7 +4156,6 @@ "version": "4.0.5", "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.5.tgz", "integrity": "sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==", - "dev": true, "dependencies": { "braces": "^3.0.2", "picomatch": "^2.3.1" @@ -2862,11 +4164,52 @@ "node": ">=8.6" } }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mimic-response": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", + "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", + "optional": true, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/minim": { + "version": "0.23.8", + "resolved": "https://registry.npmjs.org/minim/-/minim-0.23.8.tgz", + "integrity": "sha512-bjdr2xW1dBCMsMGGsUeqM4eFI60m94+szhxWys+B1ztIt6gWSfeGBdSVCIawezeHYLYn0j6zrsXdQS/JllBzww==", + "dependencies": { + "lodash": "^4.15.0" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/minimatch": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, "dependencies": { "brace-expansion": "^1.1.7" }, @@ -2874,12 +4217,32 @@ "node": "*" } }, + "node_modules/minimist": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", + "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/mkdirp-classic": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", + "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", + "optional": true + }, "node_modules/ms": { "version": "2.1.2", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", "dev": true }, + "node_modules/nan": { + "version": "2.18.0", + "resolved": "https://registry.npmjs.org/nan/-/nan-2.18.0.tgz", + "integrity": "sha512-W7tfG7vMOGtD30sHoZSSc/JVYiyDPEyQVso/Zz+/uQd0B0L46gtC+pHha5FFMRpil6fm/AoEcRWyOVi4+E/f8w==", + "optional": true + }, "node_modules/nanoid": { "version": "3.3.6", "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.6.tgz", @@ -2898,26 +4261,113 @@ "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" } }, + "node_modules/napi-build-utils": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-1.0.2.tgz", + "integrity": "sha512-ONmRUqK7zj7DWX0D9ADe03wbwOBZxNAfF20PlGfCWQcD3+/MakShIHrMqx9YwPTfxDdF1zLeL+RGZiR9kGMLdg==", + "optional": true + }, "node_modules/natural-compare": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", "dev": true }, + "node_modules/node-abi": { + "version": "3.51.0", + "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.51.0.tgz", + "integrity": "sha512-SQkEP4hmNWjlniS5zdnfIXTk1x7Ome85RDzHlTbBtzE97Gfwz/Ipw4v/Ryk20DWIy3yCNVLVlGKApCnmvYoJbA==", + "optional": true, + "dependencies": { + "semver": "^7.3.5" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/node-abort-controller": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/node-abort-controller/-/node-abort-controller-3.1.1.tgz", + "integrity": "sha512-AGK2yQKIjRuqnc6VkX2Xj5d+QW8xZ87pa1UK6yA6ouUyuxfHuMP6umE5QK7UmTeOAymo+Zx1Fxiuw9rVx8taHQ==" + }, + "node_modules/node-domexception": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz", + "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/jimmywarting" + }, + { + "type": "github", + "url": "https://paypal.me/jimmywarting" + } + ], + "engines": { + "node": ">=10.5.0" + } + }, + "node_modules/node-fetch-commonjs": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/node-fetch-commonjs/-/node-fetch-commonjs-3.3.2.tgz", + "integrity": "sha512-VBlAiynj3VMLrotgwOS3OyECFxas5y7ltLcK4t41lMUZeaK15Ym4QRkqN0EQKAFL42q9i21EPKjzLUPfltR72A==", + "dependencies": { + "node-domexception": "^1.0.0", + "web-streams-polyfill": "^3.0.3" + }, + "engines": { + "node": "^12.20.0 || ^14.13.1 || >=16.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/node-fetch" + } + }, "node_modules/nullthrows": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/nullthrows/-/nullthrows-1.1.1.tgz", "integrity": "sha512-2vPPEi+Z7WqML2jZYddDIfy5Dqb0r2fze2zTxNNknZaFpVHU3mFB3R+DWeJWGVx0ecvttSGlJTI+WG+8Z4cDWw==" }, + "node_modules/object-assign": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/object-assign/-/object-assign-4.1.1.tgz", + "integrity": "sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/object-inspect": { + "version": "1.13.1", + "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.1.tgz", + "integrity": "sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "dev": true, "dependencies": { "wrappy": "1" } }, + "node_modules/open": { + "version": "7.4.2", + "resolved": "https://registry.npmjs.org/open/-/open-7.4.2.tgz", + "integrity": "sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==", + "dependencies": { + "is-docker": "^2.0.0", + "is-wsl": "^2.1.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/optionator": { "version": "0.9.3", "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz", @@ -2935,6 +4385,14 @@ "node": ">= 0.8.0" } }, + "node_modules/os-tmpdir": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/os-tmpdir/-/os-tmpdir-1.0.2.tgz", + "integrity": "sha512-D2FR03Vir7FIu45XBY20mTb+/ZSWB00sjU9jdQXt83gDrI4Ztz5Fs7/yy74g2N5SVQY4xY1qDr4rNddwYRVX0g==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/p-limit": { "version": "3.1.0", "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", @@ -2977,6 +4435,71 @@ "node": ">=6" } }, + "node_modules/parse-entities": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/parse-entities/-/parse-entities-2.0.0.tgz", + "integrity": "sha512-kkywGpCcRYhqQIchaWqZ875wzpS/bMKhz5HnN3p7wveJTkTtyAB/AlnS0f8DFSqYW1T82t6yEAkEcB+A1I3MbQ==", + "dependencies": { + "character-entities": "^1.0.0", + "character-entities-legacy": "^1.0.0", + "character-reference-invalid": "^1.0.0", + "is-alphanumerical": "^1.0.0", + "is-decimal": "^1.0.0", + "is-hexadecimal": "^1.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/patch-package": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/patch-package/-/patch-package-8.0.0.tgz", + "integrity": "sha512-da8BVIhzjtgScwDJ2TtKsfT5JFWz1hYoBl9rUQ1f38MC2HwnEIkK8VN3dKMKcP7P7bvvgzNDbfNHtx3MsQb5vA==", + "dependencies": { + "@yarnpkg/lockfile": "^1.1.0", + "chalk": "^4.1.2", + "ci-info": "^3.7.0", + "cross-spawn": "^7.0.3", + "find-yarn-workspace-root": "^2.0.0", + "fs-extra": "^9.0.0", + "json-stable-stringify": "^1.0.2", + "klaw-sync": "^6.0.0", + "minimist": "^1.2.6", + "open": "^7.4.2", + "rimraf": "^2.6.3", + "semver": "^7.5.3", + "slash": "^2.0.0", + "tmp": "^0.0.33", + "yaml": "^2.2.2" + }, + "bin": { + "patch-package": "index.js" + }, + "engines": { + "node": ">=14", + "npm": ">5" + } + }, + "node_modules/patch-package/node_modules/rimraf": { + "version": "2.7.1", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-2.7.1.tgz", + "integrity": "sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + } + }, + "node_modules/patch-package/node_modules/slash": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz", + "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==", + "engines": { + "node": ">=6" + } + }, "node_modules/path-exists": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", @@ -2990,7 +4513,6 @@ "version": "1.0.1", "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "dev": true, "engines": { "node": ">=0.10.0" } @@ -2999,7 +4521,6 @@ "version": "3.1.1", "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, "engines": { "node": ">=8" } @@ -3023,7 +4544,6 @@ "version": "2.3.1", "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, "engines": { "node": ">=8.6" }, @@ -3042,15 +4562,10 @@ "tslib": "^2.1.0" } }, - "node_modules/popmotion/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/postcss": { - "version": "8.4.27", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.27.tgz", - "integrity": "sha512-gY/ACJtJPSmUFPDCHtX78+01fHa64FaU4zaaWfuh1MhGJISufJAH4cun6k/8fwsHYeK4UQmENQK+tRLCFJE8JQ==", + "version": "8.4.31", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.31.tgz", + "integrity": "sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==", "dev": true, "funding": [ { @@ -3075,6 +4590,32 @@ "node": "^10 || ^12 || >=14" } }, + "node_modules/prebuild-install": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.1.tgz", + "integrity": "sha512-jAXscXWMcCK8GgCoHOfIr0ODh5ai8mj63L2nWrjuAgXE6tDyYGnx4/8o/rCgU+B4JSyZBKbeZqzhtwtC3ovxjw==", + "optional": true, + "dependencies": { + "detect-libc": "^2.0.0", + "expand-template": "^2.0.3", + "github-from-package": "0.0.0", + "minimist": "^1.2.3", + "mkdirp-classic": "^0.5.3", + "napi-build-utils": "^1.0.1", + "node-abi": "^3.3.0", + "pump": "^3.0.0", + "rc": "^1.2.7", + "simple-get": "^4.0.0", + "tar-fs": "^2.0.0", + "tunnel-agent": "^0.6.0" + }, + "bin": { + "prebuild-install": "bin.js" + }, + "engines": { + "node": ">=10" + } + }, "node_modules/prelude-ls": { "version": "1.2.1", "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", @@ -3084,15 +4625,87 @@ "node": ">= 0.8.0" } }, + "node_modules/prismjs": { + "version": "1.29.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.29.0.tgz", + "integrity": "sha512-Kx/1w86q/epKcmte75LNrEoT+lX8pBpavuAbvJWRXar7Hz8jrtF+e3vY751p0R8H9HdArwaCTNDDzHg/ScJK1Q==", + "engines": { + "node": ">=6" + } + }, + "node_modules/process": { + "version": "0.11.10", + "resolved": "https://registry.npmjs.org/process/-/process-0.11.10.tgz", + "integrity": "sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==", + "engines": { + "node": ">= 0.6.0" + } + }, + "node_modules/prop-types": { + "version": "15.8.1", + "resolved": "https://registry.npmjs.org/prop-types/-/prop-types-15.8.1.tgz", + "integrity": "sha512-oj87CgZICdulUohogVAR7AjlC0327U4el4L6eAvOqCeudMDVU0NThNaV+b9Df4dXgSP1gXMTnPdhfe/2qDH5cg==", + "dependencies": { + "loose-envify": "^1.4.0", + "object-assign": "^4.1.1", + "react-is": "^16.13.1" + } + }, + "node_modules/property-information": { + "version": "5.6.0", + "resolved": "https://registry.npmjs.org/property-information/-/property-information-5.6.0.tgz", + "integrity": "sha512-YUHSPk+A30YPv+0Qf8i9Mbfe/C0hdPXk1s1jPVToV8pk8BQtpw10ct89Eo7OWkutrwqvT0eicAxlOg3dOAu8JA==", + "dependencies": { + "xtend": "^4.0.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" + }, + "node_modules/pump": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.0.tgz", + "integrity": "sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==", + "optional": true, + "dependencies": { + "end-of-stream": "^1.1.0", + "once": "^1.3.1" + } + }, "node_modules/punycode": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.0.tgz", - "integrity": "sha512-rRV+zQD8tVFys26lAGR9WUuS4iUAngJScM+ZRSKtvl5tKeZ2t5bvdNFdNHBW9FWR4guGHlgmsZ1G7BSm2wTbuA==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", "dev": true, "engines": { "node": ">=6" } }, + "node_modules/qs": { + "version": "6.11.2", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.11.2.tgz", + "integrity": "sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA==", + "dependencies": { + "side-channel": "^1.0.4" + }, + "engines": { + "node": ">=0.6" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/querystringify": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/querystringify/-/querystringify-2.2.0.tgz", + "integrity": "sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==" + }, "node_modules/queue-microtask": { "version": "1.2.3", "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", @@ -3113,6 +4726,74 @@ } ] }, + "node_modules/ramda": { + "version": "0.29.1", + "resolved": "https://registry.npmjs.org/ramda/-/ramda-0.29.1.tgz", + "integrity": "sha512-OfxIeWzd4xdUNxlWhgFazxsA/nl3mS4/jGZI5n00uWOoSSFRhC1b6gl6xvmzUamgmqELraWp0J/qqVlXYPDPyA==", + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/ramda" + } + }, + "node_modules/ramda-adjunct": { + "version": "4.1.1", + "resolved": "https://registry.npmjs.org/ramda-adjunct/-/ramda-adjunct-4.1.1.tgz", + "integrity": "sha512-BnCGsZybQZMDGram9y7RiryoRHS5uwx8YeGuUeDKuZuvK38XO6JJfmK85BwRWAKFA6pZ5nZBO/HBFtExVaf31w==", + "engines": { + "node": ">=0.10.3" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/ramda-adjunct" + }, + "peerDependencies": { + "ramda": ">= 0.29.0" + } + }, + "node_modules/randexp": { + "version": "0.5.3", + "resolved": "https://registry.npmjs.org/randexp/-/randexp-0.5.3.tgz", + "integrity": "sha512-U+5l2KrcMNOUPYvazA3h5ekF80FHTUG+87SEAmHZmolh1M+i/WyTCxVzmi+tidIa1tM4BSe8g2Y/D3loWDjj+w==", + "dependencies": { + "drange": "^1.0.2", + "ret": "^0.2.0" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/randombytes": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/randombytes/-/randombytes-2.1.0.tgz", + "integrity": "sha512-vYl3iOX+4CKUWuxGi9Ukhie6fsqXqS9FE2Zaic4tNFD2N2QQaXOMFbuKK4QmDHC0JO6B1Zp41J0LpT0oR68amQ==", + "dependencies": { + "safe-buffer": "^5.1.0" + } + }, + "node_modules/rc": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", + "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", + "optional": true, + "dependencies": { + "deep-extend": "^0.6.0", + "ini": "~1.3.0", + "minimist": "^1.2.0", + "strip-json-comments": "~2.0.1" + }, + "bin": { + "rc": "cli.js" + } + }, + "node_modules/rc/node_modules/strip-json-comments": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", + "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", + "optional": true, + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/react": { "version": "18.2.0", "resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz", @@ -3124,6 +4805,30 @@ "node": ">=0.10.0" } }, + "node_modules/react-copy-to-clipboard": { + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/react-copy-to-clipboard/-/react-copy-to-clipboard-5.1.0.tgz", + "integrity": "sha512-k61RsNgAayIJNoy9yDsYzDe/yAZAzEbEgcz3DZMhF686LEyukcE1hzurxe85JandPUG+yTfGVFzuEw3xt8WP/A==", + "dependencies": { + "copy-to-clipboard": "^3.3.1", + "prop-types": "^15.8.1" + }, + "peerDependencies": { + "react": "^15.3.0 || 16 || 17 || 18" + } + }, + "node_modules/react-debounce-input": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/react-debounce-input/-/react-debounce-input-3.3.0.tgz", + "integrity": "sha512-VEqkvs8JvY/IIZvh71Z0TC+mdbxERvYF33RcebnodlsUZ8RSgyKe2VWaHXv4+/8aoOgXLxWrdsYs2hDhcwbUgA==", + "dependencies": { + "lodash.debounce": "^4", + "prop-types": "^15.8.1" + }, + "peerDependencies": { + "react": "^15.3.0 || 16 || 17 || 18" + } + }, "node_modules/react-dom": { "version": "18.2.0", "resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz", @@ -3136,21 +4841,83 @@ "react": "^18.2.0" } }, - "node_modules/react-hook-form": { - "version": "7.46.1", - "resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.46.1.tgz", - "integrity": "sha512-0GfI31LRTBd5tqbXMGXT1Rdsv3rnvy0FjEk8Gn9/4tp6+s77T7DPZuGEpBRXOauL+NhyGT5iaXzdIM2R6F/E+w==", - "engines": { - "node": ">=12.22.0" + "node_modules/react-immutable-proptypes": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/react-immutable-proptypes/-/react-immutable-proptypes-2.2.0.tgz", + "integrity": "sha512-Vf4gBsePlwdGvSZoLSBfd4HAP93HDauMY4fDjXhreg/vg6F3Fj/MXDNyTbltPC/xZKmZc+cjLu3598DdYK6sgQ==", + "dependencies": { + "invariant": "^2.2.2" }, - "funding": { - "type": "opencollective", - "url": "https://opencollective.com/react-hook-form" + "peerDependencies": { + "immutable": ">=3.6.2" + } + }, + "node_modules/react-immutable-pure-component": { + "version": "2.2.2", + "resolved": "https://registry.npmjs.org/react-immutable-pure-component/-/react-immutable-pure-component-2.2.2.tgz", + "integrity": "sha512-vkgoMJUDqHZfXXnjVlG3keCxSO/U6WeDQ5/Sl0GK2cH8TOxEzQ5jXqDXHEL/jqk6fsNxV05oH5kD7VNMUE2k+A==", + "peerDependencies": { + "immutable": ">= 2 || >= 4.0.0-rc", + "react": ">= 16.6", + "react-dom": ">= 16.6" + } + }, + "node_modules/react-inspector": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/react-inspector/-/react-inspector-6.0.2.tgz", + "integrity": "sha512-x+b7LxhmHXjHoU/VrFAzw5iutsILRoYyDq97EDYdFpPLcvqtEzk4ZSZSQjnFPbr5T57tLXnHcqFYoN1pI6u8uQ==", + "peerDependencies": { + "react": "^16.8.4 || ^17.0.0 || ^18.0.0" + } + }, + "node_modules/react-is": { + "version": "16.13.1", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-16.13.1.tgz", + "integrity": "sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==" + }, + "node_modules/react-redux": { + "version": "8.1.3", + "resolved": "https://registry.npmjs.org/react-redux/-/react-redux-8.1.3.tgz", + "integrity": "sha512-n0ZrutD7DaX/j9VscF+uTALI3oUPa/pO4Z3soOBIjuRn/FzVu6aehhysxZCLi6y7duMf52WNZGMl7CtuK5EnRw==", + "dependencies": { + "@babel/runtime": "^7.12.1", + "@types/hoist-non-react-statics": "^3.3.1", + "@types/use-sync-external-store": "^0.0.3", + "hoist-non-react-statics": "^3.3.2", + "react-is": "^18.0.0", + "use-sync-external-store": "^1.0.0" }, "peerDependencies": { - "react": "^16.8.0 || ^17 || ^18" + "@types/react": "^16.8 || ^17.0 || ^18.0", + "@types/react-dom": "^16.8 || ^17.0 || ^18.0", + "react": "^16.8 || ^17.0 || ^18.0", + "react-dom": "^16.8 || ^17.0 || ^18.0", + "react-native": ">=0.59", + "redux": "^4 || ^5.0.0-beta.0" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + }, + "react-dom": { + "optional": true + }, + "react-native": { + "optional": true + }, + "redux": { + "optional": true + } } }, + "node_modules/react-redux/node_modules/react-is": { + "version": "18.2.0", + "resolved": "https://registry.npmjs.org/react-is/-/react-is-18.2.0.tgz", + "integrity": "sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w==" + }, "node_modules/react-remove-scroll": { "version": "2.5.5", "resolved": "https://registry.npmjs.org/react-remove-scroll/-/react-remove-scroll-2.5.5.tgz", @@ -3196,16 +4963,6 @@ } } }, - "node_modules/react-remove-scroll-bar/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, - "node_modules/react-remove-scroll/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/react-style-singleton": { "version": "2.2.1", "resolved": "https://registry.npmjs.org/react-style-singleton/-/react-style-singleton-2.2.1.tgz", @@ -3228,15 +4985,118 @@ } } }, - "node_modules/react-style-singleton/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" + "node_modules/react-syntax-highlighter": { + "version": "15.5.0", + "resolved": "https://registry.npmjs.org/react-syntax-highlighter/-/react-syntax-highlighter-15.5.0.tgz", + "integrity": "sha512-+zq2myprEnQmH5yw6Gqc8lD55QHnpKaU8TOcFeC/Lg/MQSs8UknEA0JC4nTZGFAXC2J2Hyj/ijJ7NlabyPi2gg==", + "dependencies": { + "@babel/runtime": "^7.3.1", + "highlight.js": "^10.4.1", + "lowlight": "^1.17.0", + "prismjs": "^1.27.0", + "refractor": "^3.6.0" + }, + "peerDependencies": { + "react": ">= 0.14.0" + } + }, + "node_modules/readable-stream": { + "version": "3.6.2", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", + "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", + "optional": true, + "dependencies": { + "inherits": "^2.0.3", + "string_decoder": "^1.1.1", + "util-deprecate": "^1.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/redux": { + "version": "4.2.1", + "resolved": "https://registry.npmjs.org/redux/-/redux-4.2.1.tgz", + "integrity": "sha512-LAUYz4lc+Do8/g7aeRa8JkyDErK6ekstQaqWQrNRW//MY1TvCEpMtpTWvlQ+FPbWCx+Xixu/6SHt5N0HR+SB4w==", + "dependencies": { + "@babel/runtime": "^7.9.2" + } + }, + "node_modules/redux-immutable": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/redux-immutable/-/redux-immutable-4.0.0.tgz", + "integrity": "sha512-SchSn/DWfGb3oAejd+1hhHx01xUoxY+V7TeK0BKqpkLKiQPVFf7DYzEaKmrEVxsWxielKfSK9/Xq66YyxgR1cg==", + "peerDependencies": { + "immutable": "^3.8.1 || ^4.0.0-rc.1" + } + }, + "node_modules/refractor": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/refractor/-/refractor-3.6.0.tgz", + "integrity": "sha512-MY9W41IOWxxk31o+YvFCNyNzdkc9M20NoZK5vq6jkv4I/uh2zkWcfudj0Q1fovjUQJrNewS9NMzeTtqPf+n5EA==", + "dependencies": { + "hastscript": "^6.0.0", + "parse-entities": "^2.0.0", + "prismjs": "~1.27.0" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/refractor/node_modules/prismjs": { + "version": "1.27.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.27.0.tgz", + "integrity": "sha512-t13BGPUlFDR7wRB5kQDG4jjl7XeuH6jbJGt11JHPL96qwsEHNX2+68tFXqc1/k+/jALsbSWJKUOT/hcYAZ5LkA==", + "engines": { + "node": ">=6" + } }, "node_modules/regenerator-runtime": { - "version": "0.13.11", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.11.tgz", - "integrity": "sha512-kY1AZVr2Ra+t+piVaJ4gxaFaReZVH40AKNo7UCX6W+dEwBo/2oZJzqfuN1qLq1oL45o56cPaTXELwrTh8Fpggg==" + "version": "0.14.0", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.14.0.tgz", + "integrity": "sha512-srw17NI0TUWHuGa5CFGGmhfNIeja30WMBfbslPNhf6JrqQlLN5gcrvig1oqPxiVaXb0oW0XRKtH6Nngs5lKCIA==" + }, + "node_modules/remarkable": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/remarkable/-/remarkable-2.0.1.tgz", + "integrity": "sha512-YJyMcOH5lrR+kZdmB0aJJ4+93bEojRZ1HGDn9Eagu6ibg7aVZhc3OWbbShRid+Q5eAfsEqWxpe+g5W5nYNfNiA==", + "dependencies": { + "argparse": "^1.0.10", + "autolinker": "^3.11.0" + }, + "bin": { + "remarkable": "bin/remarkable.js" + }, + "engines": { + "node": ">= 6.0.0" + } + }, + "node_modules/remarkable/node_modules/argparse": { + "version": "1.0.10", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-1.0.10.tgz", + "integrity": "sha512-o5Roy6tNG4SL/FOkCAN6RzjiakZS25RLYFrcMttJqbdd8BWrnA+fGz57iN5Pb06pvBGvl5gQ0B48dJlslXvoTg==", + "dependencies": { + "sprintf-js": "~1.0.2" + } + }, + "node_modules/repeat-string": { + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/repeat-string/-/repeat-string-1.6.1.tgz", + "integrity": "sha512-PV0dzCYDNfRi1jCDbJzpW7jNNDRuCOG/jI5ctQcGKt/clZD+YcPS3yIlWuTJMmESC8aevCFmWJy5wjAFgNqN6w==", + "engines": { + "node": ">=0.10" + } + }, + "node_modules/requires-port": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", + "integrity": "sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==" + }, + "node_modules/reselect": { + "version": "4.1.8", + "resolved": "https://registry.npmjs.org/reselect/-/reselect-4.1.8.tgz", + "integrity": "sha512-ab9EmR80F/zQTMNeneUr4cv+jSwPJgIlvEmVwLerwrWVbpLlBuls9XHzIeTFy4cegU2NHBp3va0LKOzU5qFEYQ==" }, "node_modules/resolve-from": { "version": "4.0.0", @@ -3247,6 +5107,14 @@ "node": ">=4" } }, + "node_modules/ret": { + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/ret/-/ret-0.2.2.tgz", + "integrity": "sha512-M0b3YWQs7R3Z917WRQy1HHA7Ba7D8hvZg6UE5mLykJxQVE2ju0IXbGlaHPPlkY+WN7wFP+wUMXmBFA0aV6vYGQ==", + "engines": { + "node": ">=4" + } + }, "node_modules/reusify": { "version": "1.0.4", "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", @@ -3311,6 +5179,25 @@ "queue-microtask": "^1.2.2" } }, + "node_modules/safe-buffer": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", + "integrity": "sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ] + }, "node_modules/scheduler": { "version": "0.23.0", "resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz", @@ -3323,7 +5210,6 @@ "version": "7.5.4", "resolved": "https://registry.npmjs.org/semver/-/semver-7.5.4.tgz", "integrity": "sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==", - "dev": true, "dependencies": { "lru-cache": "^6.0.0" }, @@ -3334,6 +5220,34 @@ "node": ">=10" } }, + "node_modules/serialize-error": { + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/serialize-error/-/serialize-error-8.1.0.tgz", + "integrity": "sha512-3NnuWfM6vBYoy5gZFvHiYsVbafvI9vZv/+jlIigFn4oP4zjNPK3LhcY0xSCgeb1a5L8jO71Mit9LlNoi2UfDDQ==", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/set-function-length": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.1.1.tgz", + "integrity": "sha512-VoaqjbBJKiWtg4yRcKBQ7g7wnGnLV3M8oLvVWwOk2PdYY6PEFegR1vezXR0tw6fZGF9csVakIRjrJiy2veSBFQ==", + "dependencies": { + "define-data-property": "^1.1.1", + "get-intrinsic": "^1.2.1", + "gopd": "^1.0.1", + "has-property-descriptors": "^1.0.0" + }, + "engines": { + "node": ">= 0.4" + } + }, "node_modules/set-value": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/set-value/-/set-value-4.1.0.tgz", @@ -3351,11 +5265,22 @@ "node": ">=11.0" } }, + "node_modules/sha.js": { + "version": "2.4.11", + "resolved": "https://registry.npmjs.org/sha.js/-/sha.js-2.4.11.tgz", + "integrity": "sha512-QMEp5B7cftE7APOjk5Y6xgrbWu+WkLVQwk8JNjZ8nKRciZaByEW6MubieAiToS7+dwvrjGhH8jRXz3MVd0AYqQ==", + "dependencies": { + "inherits": "^2.0.1", + "safe-buffer": "^5.0.1" + }, + "bin": { + "sha.js": "bin.js" + } + }, "node_modules/shebang-command": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, "dependencies": { "shebang-regex": "^3.0.0" }, @@ -3367,11 +5292,77 @@ "version": "3.0.0", "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, "engines": { "node": ">=8" } }, + "node_modules/short-unique-id": { + "version": "5.0.3", + "resolved": "https://registry.npmjs.org/short-unique-id/-/short-unique-id-5.0.3.tgz", + "integrity": "sha512-yhniEILouC0s4lpH0h7rJsfylZdca10W9mDJRAFh3EpcSUanCHGb0R7kcFOIUCZYSAPo0PUD5ZxWQdW0T4xaug==", + "bin": { + "short-unique-id": "bin/short-unique-id", + "suid": "bin/short-unique-id" + } + }, + "node_modules/side-channel": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/side-channel/-/side-channel-1.0.4.tgz", + "integrity": "sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw==", + "dependencies": { + "call-bind": "^1.0.0", + "get-intrinsic": "^1.0.2", + "object-inspect": "^1.9.0" + }, + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/simple-concat": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", + "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "optional": true + }, + "node_modules/simple-get": { + "version": "4.0.1", + "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz", + "integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "optional": true, + "dependencies": { + "decompress-response": "^6.0.0", + "once": "^1.3.1", + "simple-concat": "^1.0.0" + } + }, "node_modules/slash": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", @@ -3390,6 +5381,34 @@ "node": ">=0.10.0" } }, + "node_modules/space-separated-tokens": { + "version": "1.1.5", + "resolved": "https://registry.npmjs.org/space-separated-tokens/-/space-separated-tokens-1.1.5.tgz", + "integrity": "sha512-q/JSVd1Lptzhf5bkYm4ob4iWPjx0KiRe3sRFBNrVqbJkFaBm5vbbowy1mymoPNLRa52+oadOhJ+K49wsSeSjTA==", + "funding": { + "type": "github", + "url": "https://github.com/sponsors/wooorm" + } + }, + "node_modules/sprintf-js": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz", + "integrity": "sha512-D9cPgkvLlV3t3IzL0D0YLvGA9Ahk4PcvVwUbN0dSGr1aP0Nrt4AEnTUbuGvquEC0mA64Gqt1fzirlRs5ibXx8g==" + }, + "node_modules/stampit": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/stampit/-/stampit-4.3.2.tgz", + "integrity": "sha512-pE2org1+ZWQBnIxRPrBM2gVupkuDD0TTNIo1H6GdT/vO82NXli2z8lRE8cu/nBIHrcOCXFBAHpb9ZldrB2/qOA==" + }, + "node_modules/string_decoder": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", + "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", + "optional": true, + "dependencies": { + "safe-buffer": "~5.2.0" + } + }, "node_modules/strip-ansi": { "version": "6.0.1", "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", @@ -3415,9 +5434,9 @@ } }, "node_modules/style-mod": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.0.3.tgz", - "integrity": "sha512-78Jv8kYJdjbvRwwijtCevYADfsI0lGzYJe4mMFdceO8l75DFFDoqBhR1jVDicDRRaX4//g1u9wKeo+ztc2h1Rw==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.0.tgz", + "integrity": "sha512-Ca5ib8HrFn+f+0n4N4ScTIA9iTOQ7MaGS1ylHcoVqW9J7w2w8PzN6g9gKmTYgGEBH8e120+RCmhpje6jC5uGWA==", "peer": true }, "node_modules/style-value-types": { @@ -3429,16 +5448,10 @@ "tslib": "^2.1.0" } }, - "node_modules/style-value-types/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/supports-color": { "version": "7.2.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", - "dev": true, "dependencies": { "has-flag": "^4.0.0" }, @@ -3446,17 +5459,131 @@ "node": ">=8" } }, + "node_modules/swagger-client": { + "version": "3.24.5", + "resolved": "https://registry.npmjs.org/swagger-client/-/swagger-client-3.24.5.tgz", + "integrity": "sha512-qb4Rr9LpWs7o2AO4KdiIK+dz0GbrRLyD+UyN24h6AcNcDUnwfkb6LgFE4e6bXwVXWJzMp27w1QvSQ4hQNMPnoQ==", + "dependencies": { + "@babel/runtime-corejs3": "^7.22.15", + "@swagger-api/apidom-core": ">=0.83.0 <1.0.0", + "@swagger-api/apidom-error": ">=0.83.0 <1.0.0", + "@swagger-api/apidom-json-pointer": ">=0.83.0 <1.0.0", + "@swagger-api/apidom-ns-openapi-3-1": ">=0.83.0 <1.0.0", + "@swagger-api/apidom-reference": ">=0.83.0 <1.0.0", + "cookie": "~0.5.0", + "deepmerge": "~4.3.0", + "fast-json-patch": "^3.0.0-1", + "is-plain-object": "^5.0.0", + "js-yaml": "^4.1.0", + "node-abort-controller": "^3.1.1", + "node-fetch-commonjs": "^3.3.1", + "qs": "^6.10.2", + "traverse": "~0.6.6", + "undici": "^5.24.0" + } + }, + "node_modules/swagger-client/node_modules/is-plain-object": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/is-plain-object/-/is-plain-object-5.0.0.tgz", + "integrity": "sha512-VRSzKkbMm5jMDoKLbltAkFQ5Qr7VDiTFGXxYFXXowVj387GeGNOCsOH6Msy00SGZ3Fp84b1Naa1psqgcCIEP5Q==", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/swagger-ui-react": { + "version": "5.9.4", + "resolved": "https://registry.npmjs.org/swagger-ui-react/-/swagger-ui-react-5.9.4.tgz", + "integrity": "sha512-VEY+QuNtRkidR/Os81zq22TpbXkfFDJ2pB6UF+J5sgrf2zksPr2oQGADpWn6RkYXXudZcalXUjcUqgx8WgIfaA==", + "dependencies": { + "@babel/runtime-corejs3": "^7.23.2", + "@braintree/sanitize-url": "=6.0.4", + "base64-js": "^1.5.1", + "classnames": "^2.3.1", + "css.escape": "1.5.1", + "deep-extend": "0.6.0", + "dompurify": "=3.0.6", + "ieee754": "^1.2.1", + "immutable": "^3.x.x", + "js-file-download": "^0.4.12", + "js-yaml": "=4.1.0", + "lodash": "^4.17.21", + "patch-package": "^8.0.0", + "prop-types": "^15.8.1", + "randexp": "^0.5.3", + "randombytes": "^2.1.0", + "react-copy-to-clipboard": "5.1.0", + "react-debounce-input": "=3.3.0", + "react-immutable-proptypes": "2.2.0", + "react-immutable-pure-component": "^2.2.0", + "react-inspector": "^6.0.1", + "react-redux": "^8.1.3", + "react-syntax-highlighter": "^15.5.0", + "redux": "^4.1.2", + "redux-immutable": "^4.0.0", + "remarkable": "^2.0.1", + "reselect": "^4.1.8", + "serialize-error": "^8.1.0", + "sha.js": "^2.4.11", + "swagger-client": "^3.24.5", + "url-parse": "^1.5.10", + "xml": "=1.0.1", + "xml-but-prettier": "^1.0.1", + "zenscroll": "^4.0.2" + }, + "peerDependencies": { + "react": ">=17.0.0", + "react-dom": ">=17.0.0" + } + }, + "node_modules/tar-fs": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.1.tgz", + "integrity": "sha512-V0r2Y9scmbDRLCNex/+hYzvp/zyYjvFbHPNgVTKfQvVrb6guiE/fxP+XblDNR011utopbkex2nM4dHNV6GDsng==", + "optional": true, + "dependencies": { + "chownr": "^1.1.1", + "mkdirp-classic": "^0.5.2", + "pump": "^3.0.0", + "tar-stream": "^2.1.4" + } + }, + "node_modules/tar-stream": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", + "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", + "optional": true, + "dependencies": { + "bl": "^4.0.3", + "end-of-stream": "^1.4.1", + "fs-constants": "^1.0.0", + "inherits": "^2.0.3", + "readable-stream": "^3.1.1" + }, + "engines": { + "node": ">=6" + } + }, "node_modules/text-table": { "version": "0.2.0", "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", "dev": true }, + "node_modules/tmp": { + "version": "0.0.33", + "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.0.33.tgz", + "integrity": "sha512-jRCJlojKnZ3addtTOjdIqoRuPEKBvNXcGYqzO6zWZX8KfKEpnGY5jfggJQ3EjKuu8D4bJRr0y+cYJFmYbImXGw==", + "dependencies": { + "os-tmpdir": "~1.0.2" + }, + "engines": { + "node": ">=0.6.0" + } + }, "node_modules/to-regex-range": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, "dependencies": { "is-number": "^7.0.0" }, @@ -3469,6 +5596,45 @@ "resolved": "https://registry.npmjs.org/toggle-selection/-/toggle-selection-1.0.6.tgz", "integrity": "sha512-BiZS+C1OS8g/q2RRbJmy59xpyghNBqrr6k5L/uKBGRsTfxmu3ffiRnd8mlGPUVayg8pvfi5urfnu8TU7DVOkLQ==" }, + "node_modules/traverse": { + "version": "0.6.7", + "resolved": "https://registry.npmjs.org/traverse/-/traverse-0.6.7.tgz", + "integrity": "sha512-/y956gpUo9ZNCb99YjxG7OaslxZWHfCHAUUfshwqOXmxUIvqLjVO581BT+gM59+QV9tFe6/CGG53tsA1Y7RSdg==", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/tree-sitter": { + "version": "0.20.4", + "resolved": "https://registry.npmjs.org/tree-sitter/-/tree-sitter-0.20.4.tgz", + "integrity": "sha512-rjfR5dc4knG3jnJNN/giJ9WOoN1zL/kZyrS0ILh+eqq8RNcIbiXA63JsMEgluug0aNvfQvK4BfCErN1vIzvKog==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "nan": "^2.17.0", + "prebuild-install": "^7.1.1" + } + }, + "node_modules/tree-sitter-json": { + "version": "0.20.1", + "resolved": "https://registry.npmjs.org/tree-sitter-json/-/tree-sitter-json-0.20.1.tgz", + "integrity": "sha512-482hf7J+aBwhksSw8yWaqI8nyP1DrSwnS4IMBShsnkFWD3SE8oalHnsEik59fEVi3orcTCUtMzSjZx+0Tpa6Vw==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "nan": "^2.18.0" + } + }, + "node_modules/tree-sitter-yaml": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/tree-sitter-yaml/-/tree-sitter-yaml-0.5.0.tgz", + "integrity": "sha512-POJ4ZNXXSWIG/W4Rjuyg36MkUD4d769YRUGKRqN+sVaj/VCo6Dh6Pkssn1Rtewd5kybx+jT1BWMyWN0CijXnMA==", + "hasInstallScript": true, + "optional": true, + "dependencies": { + "nan": "^2.14.0" + } + }, "node_modules/ts-api-utils": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.0.1.tgz", @@ -3481,6 +5647,28 @@ "typescript": ">=4.2.0" } }, + "node_modules/ts-toolbelt": { + "version": "9.6.0", + "resolved": "https://registry.npmjs.org/ts-toolbelt/-/ts-toolbelt-9.6.0.tgz", + "integrity": "sha512-nsZd8ZeNUzukXPlJmTBwUAuABDe/9qtVDelJeT/qW0ow3ZS3BsQJtNkan1802aM9Uf68/Y8ljw86Hu0h5IUW3w==" + }, + "node_modules/tslib": { + "version": "2.6.2", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.2.tgz", + "integrity": "sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==" + }, + "node_modules/tunnel-agent": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", + "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", + "optional": true, + "dependencies": { + "safe-buffer": "^5.0.1" + }, + "engines": { + "node": "*" + } + }, "node_modules/type-check": { "version": "0.4.0", "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", @@ -3497,7 +5685,6 @@ "version": "0.20.2", "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", - "dev": true, "engines": { "node": ">=10" }, @@ -3505,6 +5692,14 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/types-ramda": { + "version": "0.29.5", + "resolved": "https://registry.npmjs.org/types-ramda/-/types-ramda-0.29.5.tgz", + "integrity": "sha512-u+bAYXHDPJR+amB0qMrMU/NXRB2PG8QqpO2v6j7yK/0mPZhlaaZj++ynYjnVpkPEpCkZEGxNpWY3X7qyLCGE3w==", + "dependencies": { + "ts-toolbelt": "^9.6.0" + } + }, "node_modules/typescript": { "version": "5.2.2", "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.2.2.tgz", @@ -3523,6 +5718,30 @@ "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==" }, + "node_modules/undici": { + "version": "5.27.2", + "resolved": "https://registry.npmjs.org/undici/-/undici-5.27.2.tgz", + "integrity": "sha512-iS857PdOEy/y3wlM3yRp+6SNQQ6xU0mmZcwRSriqk+et/cwWAtwmIGf6WkoDN2EK/AMdCO/dfXzIwi+rFMrjjQ==", + "dependencies": { + "@fastify/busboy": "^2.0.0" + }, + "engines": { + "node": ">=14.0" + } + }, + "node_modules/universalify": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.0.tgz", + "integrity": "sha512-hAZsKq7Yy11Zu1DE0OzWjw7nnLZmJZYTDZZyEFHZdUhV8FkH5MCfoU1XMaxXovpyW5nq5scPqq0ZDP9Zyl04oQ==", + "engines": { + "node": ">= 10.0.0" + } + }, + "node_modules/unraw": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/unraw/-/unraw-3.0.0.tgz", + "integrity": "sha512-08/DA66UF65OlpUDIQtbJyrqTR0jTAlJ+jsnkQ4jxR7+K5g5YG1APZKQSMCE1vqqmD+2pv6+IdEjmopFatacvg==" + }, "node_modules/uri-js": { "version": "4.4.1", "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", @@ -3532,6 +5751,15 @@ "punycode": "^2.1.0" } }, + "node_modules/url-parse": { + "version": "1.5.10", + "resolved": "https://registry.npmjs.org/url-parse/-/url-parse-1.5.10.tgz", + "integrity": "sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==", + "dependencies": { + "querystringify": "^2.1.1", + "requires-port": "^1.0.0" + } + }, "node_modules/use-callback-ref": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/use-callback-ref/-/use-callback-ref-1.3.0.tgz", @@ -3552,11 +5780,6 @@ } } }, - "node_modules/use-callback-ref/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/use-sidecar": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/use-sidecar/-/use-sidecar-1.1.2.tgz", @@ -3578,11 +5801,6 @@ } } }, - "node_modules/use-sidecar/node_modules/tslib": { - "version": "2.6.1", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.1.tgz", - "integrity": "sha512-t0hLfiEKfMUoqhG+U1oid7Pva4bbDPHYfJNiB7BiIjRkj1pyC++4N3huJfqY6aRH6VTB0rvtzQwjM4K6qpfOig==" - }, "node_modules/use-sync-external-store": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/use-sync-external-store/-/use-sync-external-store-1.2.0.tgz", @@ -3591,10 +5809,16 @@ "react": "^16.8.0 || ^17.0.0 || ^18.0.0" } }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "optional": true + }, "node_modules/vite": { - "version": "4.4.9", - "resolved": "https://registry.npmjs.org/vite/-/vite-4.4.9.tgz", - "integrity": "sha512-2mbUn2LlUmNASWwSCNSJ/EG2HuSRTnVNaydp6vMCm5VIqJsjMfbIWtbH2kDuwUVW5mMUKKZvGPX/rqeqVvv1XA==", + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/vite/-/vite-4.5.0.tgz", + "integrity": "sha512-ulr8rNLA6rkyFAlVWw2q5YJ91v098AFQ2R0PRFwPzREXOUJQPtFUG0t+/ZikhaOCDqFoDhN6/v8Sq0o4araFAw==", "dev": true, "dependencies": { "esbuild": "^0.18.10", @@ -3647,9 +5871,9 @@ } }, "node_modules/vscode-languageserver-types": { - "version": "3.17.3", - "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.3.tgz", - "integrity": "sha512-SYU4z1dL0PyIMd4Vj8YOqFvHu7Hz/enbWtpfnVbJHU4Nd1YNYx8u0ennumc6h48GQNeOLxmwySmnADouT/AuZA==" + "version": "3.17.5", + "resolved": "https://registry.npmjs.org/vscode-languageserver-types/-/vscode-languageserver-types-3.17.5.tgz", + "integrity": "sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==" }, "node_modules/w3c-keyname": { "version": "2.2.8", @@ -3657,11 +5881,24 @@ "integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==", "peer": true }, + "node_modules/web-streams-polyfill": { + "version": "3.2.1", + "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.2.1.tgz", + "integrity": "sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q==", + "engines": { + "node": ">= 8" + } + }, + "node_modules/web-tree-sitter": { + "version": "0.20.3", + "resolved": "https://registry.npmjs.org/web-tree-sitter/-/web-tree-sitter-0.20.3.tgz", + "integrity": "sha512-zKGJW9r23y3BcJusbgvnOH2OYAW40MXAOi9bi3Gcc7T4Gms9WWgXF8m6adsJWpGJEhgOzCrfiz1IzKowJWrtYw==", + "optional": true + }, "node_modules/which": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, "dependencies": { "isexe": "^2.0.0" }, @@ -3675,14 +5912,41 @@ "node_modules/wrappy": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" + }, + "node_modules/xml": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/xml/-/xml-1.0.1.tgz", + "integrity": "sha512-huCv9IH9Tcf95zuYCsQraZtWnJvBtLVE0QHMOs8bWyZAFZNDcYjsPq1nEx8jKA9y+Beo9v+7OBPRisQTjinQMw==" + }, + "node_modules/xml-but-prettier": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/xml-but-prettier/-/xml-but-prettier-1.0.1.tgz", + "integrity": "sha512-C2CJaadHrZTqESlH03WOyw0oZTtoy2uEg6dSDF6YRg+9GnYNub53RRemLpnvtbHDFelxMx4LajiFsYeR6XJHgQ==", + "dependencies": { + "repeat-string": "^1.5.2" + } + }, + "node_modules/xtend": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/xtend/-/xtend-4.0.2.tgz", + "integrity": "sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==", + "engines": { + "node": ">=0.4" + } }, "node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true + "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==" + }, + "node_modules/yaml": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.3.tgz", + "integrity": "sha512-zw0VAJxgeZ6+++/su5AFoqBbZbrEakwu+X0M5HmcwUiBL7AzcuPKjj5we4xfQLp78LkEMpD0cOnUhmgOVy3KdQ==", + "engines": { + "node": ">= 14" + } }, "node_modules/yocto-queue": { "version": "0.1.0", @@ -3695,6 +5959,11 @@ "funding": { "url": "https://github.com/sponsors/sindresorhus" } + }, + "node_modules/zenscroll": { + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/zenscroll/-/zenscroll-4.0.2.tgz", + "integrity": "sha512-jEA1znR7b4C/NnaycInCU6h/d15ZzCd1jmsruqOKnZP6WXQSMH3W2GL+OXbkruslU4h+Tzuos0HdswzRUk/Vgg==" } } } diff --git a/playground/package.json b/playground/package.json index d211df0704..7e9aacc0e6 100644 --- a/playground/package.json +++ b/playground/package.json @@ -10,24 +10,23 @@ "preview": "vite preview" }, "dependencies": { - "@tanstack/react-query": "^4.35.3", - "fast-json-patch": "^3.1.1", - "graphiql": "^3.0.5", - "graphql": "^16.8.0", + "graphiql": "^3.0.9", + "graphql": "^16.8.1", "react": "^18.2.0", "react-dom": "^18.2.0", - "react-hook-form": "^7.46.1" + "swagger-ui-react": "^5.9.4" }, "devDependencies": { - "@types/react": "^18.2.21", - "@types/react-dom": "^18.2.7", - "@typescript-eslint/eslint-plugin": "^6.7.0", - "@typescript-eslint/parser": "^6.7.0", - "@vitejs/plugin-react-swc": "^3.0.0", - "eslint": "^8.49.0", + "@types/react": "^18.2.37", + "@types/react-dom": "^18.2.15", + "@types/swagger-ui-react": "^4.18.1", + "@typescript-eslint/eslint-plugin": "^6.10.0", + "@typescript-eslint/parser": "^6.11.0", + "@vitejs/plugin-react-swc": "^3.4.1", + "eslint": "^8.53.0", "eslint-plugin-react-hooks": "^4.6.0", - "eslint-plugin-react-refresh": "^0.4.3", + "eslint-plugin-react-refresh": "^0.4.4", "typescript": "^5.2.2", - "vite": "^4.4.9" + "vite": "^4.5.0" } } diff --git a/playground/src/App.tsx b/playground/src/App.tsx index dc00b98cbc..cb2fa035f3 100644 --- a/playground/src/App.tsx +++ b/playground/src/App.tsx @@ -8,28 +8,29 @@ // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. +import React from 'react' import { GraphiQL } from 'graphiql' import { createGraphiQLFetcher } from '@graphiql/toolkit' import { GraphiQLPlugin } from '@graphiql/react' -import { QueryClient, QueryClientProvider } from '@tanstack/react-query' -import { Plugin } from './components/Plugin' +import 'swagger-ui-react/swagger-ui.css' import 'graphiql/graphiql.css' -const client = new QueryClient() -const fetcher = createGraphiQLFetcher({ url: 'http://localhost:9181/api/v0/graphql' }) +const baseUrl = import.meta.env.DEV ? 'http://localhost:9181' : '' +const SwaggerUI = React.lazy(() => import('swagger-ui-react')) +const fetcher = createGraphiQLFetcher({ url: `${baseUrl}/api/v0/graphql` }) const plugin: GraphiQLPlugin = { - title: 'DefraDB', - icon: () => (DB), - content: () => (), + title: 'DefraDB API', + icon: () => (API), + content: () => ( + + + + ), } function App() { - return ( - - - - ) + return () } export default App diff --git a/playground/src/components/Plugin.tsx b/playground/src/components/Plugin.tsx deleted file mode 100644 index e8c727fe61..0000000000 --- a/playground/src/components/Plugin.tsx +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -import { useQuery } from '@tanstack/react-query' -import { SchemaLoadForm } from './SchemaLoadForm' -import { SchemaPatchForm } from './SchemaPatchForm' -import { listSchema } from '../lib/api' - -const defaultFieldTypes = [ - 'ID', - 'Boolean', - '[Boolean]', - '[Boolean!]', - 'Int', - '[Int]', - '[Int!]', - 'DateTime', - 'Float', - '[Float]', - '[Float!]', - 'String', - '[String]', - '[String!]', -] - -export function Plugin() { - const { data } = useQuery({ queryKey: ['schemas'], queryFn: listSchema }) - - const collections = data?.data?.collections ?? [] - const schemaFieldTypes = collections.map(col => [`${col.name}`, `[${col.name}]`]).flat() - const fieldTypes = [...defaultFieldTypes, ...schemaFieldTypes] - - return ( - - DefraDB - - - Add Schema - - - { collections?.map((schema) => - - {schema.name} Schema - - - )} - - - ) -} \ No newline at end of file diff --git a/playground/src/components/SchemaLoadForm.tsx b/playground/src/components/SchemaLoadForm.tsx deleted file mode 100644 index a1df44d87c..0000000000 --- a/playground/src/components/SchemaLoadForm.tsx +++ /dev/null @@ -1,81 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -import { useState, useEffect } from 'react' -import { useForm } from 'react-hook-form' -import { useSchemaContext } from '@graphiql/react' -import { useQueryClient } from '@tanstack/react-query' -import { loadSchema, ErrorItem } from '../lib/api' - -export type FormData = { - schema: string -} - -const defaultValues: FormData = { - schema: '', -} - -export function SchemaLoadForm() { - const queryClient = useQueryClient() - const schemaContext = useSchemaContext({ nonNull: true }) - - const { formState, reset, register, handleSubmit } = useForm({ defaultValues }) - - const [errors, setErrors] = useState() - const [isLoading, setIsLoading] = useState(false) - - useEffect(() => { - if (formState.isSubmitSuccessful) reset(defaultValues) - }, [formState, reset]) - - const onSubmit = async (data: FormData) => { - setErrors(undefined) - setIsLoading(true) - - try { - const res = await loadSchema(data.schema) - if (res.errors) { - setErrors(res.errors) - } else { - schemaContext.introspect() - queryClient.invalidateQueries(['schemas']) - } - } catch(err: any) { - setErrors([{ message: err.message }]) - } finally { - setIsLoading(false) - } - } - - return ( - - {errors?.map((error, index) => - - {error.message} - - )} - - - Submit - - - ) -} \ No newline at end of file diff --git a/playground/src/components/SchemaPatchForm.tsx b/playground/src/components/SchemaPatchForm.tsx deleted file mode 100644 index d832cb8bec..0000000000 --- a/playground/src/components/SchemaPatchForm.tsx +++ /dev/null @@ -1,117 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -import { useState } from 'react' -import { useForm, useFieldArray } from 'react-hook-form' -import { useSchemaContext } from '@graphiql/react' -import { useQueryClient } from '@tanstack/react-query' -import { patchSchema, Field, ErrorItem } from '../lib/api' - -export type FormData = { - name: string - fields: Field[] -} - -export type SchemaPatchFormProps = { - values?: FormData - fieldTypes: string[] -} - -export function SchemaPatchForm({ values, fieldTypes }: SchemaPatchFormProps) { - const queryClient = useQueryClient() - const schemaContext = useSchemaContext({ nonNull: true }) - - const [errors, setErrors] = useState() - const [isLoading, setIsLoading] = useState(false) - - const { control, register, handleSubmit } = useForm({ values }) - const { fields, append, remove } = useFieldArray({ control, name: 'fields', keyName: '_id' }) - - const onSubmit = async (data: FormData) => { - setErrors(undefined) - setIsLoading(true) - - try { - const res = await patchSchema(values!.name, values!.fields, data.name, data.fields) - if (res.errors) { - setErrors(res.errors) - } else { - schemaContext.introspect() - queryClient.invalidateQueries(['schemas']) - } - } catch(err: any) { - setErrors([{ message: err.message }]) - } finally { - setIsLoading(false) - } - } - - return ( - - {errors?.map((error, index) => - - {error.message} - - )} - - Fields - append({ name: '', kind: 'String', internal: false })} - > - Add - - - {fields.map((field, index) => - - - - {fieldTypes.map((value, index) => - {value} - )} - - {!field.id && - remove(index)} - disabled={isLoading || !!field.id} - > - Remove - - } - - )} - - Submit - - - ) -} \ No newline at end of file diff --git a/playground/src/index.css b/playground/src/index.css index 78177e137f..2077a4fef5 100644 --- a/playground/src/index.css +++ b/playground/src/index.css @@ -18,86 +18,51 @@ html, body, #root { height: 100vh; } -.graphiql-defradb-plugin { - display: flex; - flex-direction: column; - gap: 16px; +.swagger-ui .opblock .opblock-summary { + overflow-x: hidden; } -.graphiql-defradb-header { - margin-top: 0; - font-size: var(--font-size-h2); - font-weight: var(--font-weight-medium); +.swagger-ui .opblock .opblock-summary-path { + color: hsla(var(--color-neutral), var(--alpha-secondary)); + text-overflow: clip; + white-space: nowrap; + max-width: none; } -.graphiql-defradb-subheader { - margin: 0; - font-size: var(--font-size-h3); - font-weight: var(--font-weight-medium); +.swagger-ui .scheme-container { + background-color: hsl(var(--color-base)); + box-shadow: none; } -.graphiql-defradb-input-label { - margin: 0; - font-size: var(--font-size-h5); - font-weight: var(--font-weight-medium); -} - -.graphiql-defradb-error { - color: rgb(255, 93, 93); - border: 2px solid rgb(255, 93, 93); - border-radius: var(--border-radius-4); - padding: var(--px-8); -} - -.graphiql-defradb-input { - height: 30px; - width: 100%; - - font-size: var(--font-size-body); - color: var(--color-neutral); - - background-color: var(--color-primary); - border: 2px solid hsla(var(--color-neutral), var(--alpha-background-light)); - border-radius: var(--border-radius-4); +.swagger-ui .opblock-tag { + border-bottom: 1px solid hsla(var(--color-neutral), var(--alpha-secondary)); } -.graphiql-defradb-textarea { - min-height: 300px; - width: 100%; - resize: vertical; - - font-size: var(--font-size-body); - color: var(--color-neutral); - - background-color: var(--color-primary); - border: 2px solid hsla(var(--color-neutral), var(--alpha-background-light)); - border-radius: var(--border-radius-4); -} - -.graphiql-defradb-load-form { - display: flex; - flex-direction: column; - gap: var(--px-8); -} - -.graphiql-defradb-form { - border-radius: var(--border-radius-4); - border: 2px solid hsla(var(--color-neutral), var(--alpha-background-light)); - padding: var(--px-16); - - display: flex; - flex-direction: column; - gap: var(--px-8); +.swagger-ui, +.swagger-ui .opblock-tag, +.swagger-ui .info .title, +.swagger-ui .opblock-description-wrapper p, +.swagger-ui .opblock .opblock-section-header h4, +.swagger-ui .btn, +.swagger-ui .opblock .opblock-section-header, +.swagger-ui .tab li button.tablinks, +.swagger-ui .model-title, +.swagger-ui .model, +.swagger-ui .response-col_status, +.swagger-ui table thead tr td, +.swagger-ui .response-col_links, +.swagger-ui section.models h4, +.swagger-ui .responses-inner h4, +.swagger-ui .responses-inner h5, +.swagger-ui .renderedMarkdown p { + color: hsla(var(--color-neutral), var(--alpha-secondary)); } -.graphiql-defradb-field-header { - display: flex; - align-items: center; - justify-content: space-between; +.swagger-ui .opblock-tag small { + white-space: nowrap; + overflow-x: hidden; } -.graphiql-defradb-field { - display: flex; - align-items: center; - gap: var(--px-8); +.swagger-ui .opblock .opblock-section-header { + background-color: hsl(var(--color-base)); } diff --git a/playground/src/lib/api.ts b/playground/src/lib/api.ts deleted file mode 100644 index cb8bb07e85..0000000000 --- a/playground/src/lib/api.ts +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -import { compare } from 'fast-json-patch' - -export type Extensions = { - status: number - httpError: string - stack?: string -} - -export type ErrorItem = { - message: string - extensions?: Extensions -} - -export type Field = { - id?: string - name: string - kind: string - internal: boolean -} - -export type Collection = { - id: string - name: string -} - -export type CollectionWithFields = Collection & { - fields: Field[] -} - -export type Response = { - data: T - errors?: ErrorItem[] -} - -export type ListSchemaResponse = Response<{ - collections?: CollectionWithFields[] -}> - -export type LoadSchemaResponse = Response<{ - result?: string - collections?: Collection[] -}> - -export type PatchSchemaResponse = Response<{ - result?: string -}> - -const baseUrl = import.meta.env.DEV ? 'http://localhost:9181/api/v0' : '/api/v0' - -export async function listSchema(): Promise { - return fetch(baseUrl + '/schema').then(res => res.json()) -} - -export async function loadSchema(schema: string): Promise { - return fetch(baseUrl + '/schema', { method: 'POST', body: schema }).then(res => res.json()) -} - -export async function patchSchema(nameA: string, fieldsA: Field[], nameB: string, fieldsB: Field[]): Promise { - const schemaA = { Name: nameA, Fields: fieldsA.map(field => ({ Name: field.name, Kind: field.kind })) } - const schemaB = { Name: nameB, Fields: fieldsB.map(field => ({ Name: field.name, Kind: field.kind })) } - - const collectionA = { [nameA]: { Name: nameA, Schema: schemaA } } - const collectionB = { [nameB]: { Name: nameB, Schema: schemaB } } - - const body = JSON.stringify(compare(collectionA, collectionB)) - return fetch(baseUrl + '/schema', { method: 'PATCH', body }).then(res => res.json()) -} diff --git a/request/graphql/parser.go b/request/graphql/parser.go index ddd13d9e62..310673c347 100644 --- a/request/graphql/parser.go +++ b/request/graphql/parser.go @@ -13,10 +13,10 @@ package graphql import ( "context" - gql "github.com/graphql-go/graphql" - "github.com/graphql-go/graphql/language/ast" - gqlp "github.com/graphql-go/graphql/language/parser" - "github.com/graphql-go/graphql/language/source" + gql "github.com/sourcenetwork/graphql-go" + "github.com/sourcenetwork/graphql-go/language/ast" + gqlp "github.com/sourcenetwork/graphql-go/language/parser" + "github.com/sourcenetwork/graphql-go/language/source" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" @@ -104,13 +104,13 @@ func (p *parser) Parse(ast *ast.Document) (*request.Request, []error) { } func (p *parser) ParseSDL(ctx context.Context, schemaString string) ( - []client.CollectionDescription, + []client.CollectionDefinition, error, ) { return schema.FromString(ctx, schemaString) } -func (p *parser) SetSchema(ctx context.Context, txn datastore.Txn, collections []client.CollectionDescription) error { +func (p *parser) SetSchema(ctx context.Context, txn datastore.Txn, collections []client.CollectionDefinition) error { schemaManager, err := schema.NewSchemaManager() if err != nil { return err diff --git a/request/graphql/parser/commit.go b/request/graphql/parser/commit.go index 836fd9d167..8c9d3e47b5 100644 --- a/request/graphql/parser/commit.go +++ b/request/graphql/parser/commit.go @@ -13,8 +13,8 @@ package parser import ( "strconv" - gql "github.com/graphql-go/graphql" - "github.com/graphql-go/graphql/language/ast" + gql "github.com/sourcenetwork/graphql-go" + "github.com/sourcenetwork/graphql-go/language/ast" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client/request" diff --git a/request/graphql/parser/filter.go b/request/graphql/parser/filter.go index 46119070b4..16994e5a84 100644 --- a/request/graphql/parser/filter.go +++ b/request/graphql/parser/filter.go @@ -14,10 +14,10 @@ import ( "strconv" "strings" - gql "github.com/graphql-go/graphql" - "github.com/graphql-go/graphql/language/ast" - gqlp "github.com/graphql-go/graphql/language/parser" - gqls "github.com/graphql-go/graphql/language/source" + gql "github.com/sourcenetwork/graphql-go" + "github.com/sourcenetwork/graphql-go/language/ast" + gqlp "github.com/sourcenetwork/graphql-go/language/parser" + gqls "github.com/sourcenetwork/graphql-go/language/source" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" diff --git a/request/graphql/parser/introspection.go b/request/graphql/parser/introspection.go index babcce93d8..6c280d78d3 100644 --- a/request/graphql/parser/introspection.go +++ b/request/graphql/parser/introspection.go @@ -11,8 +11,8 @@ package parser import ( - gql "github.com/graphql-go/graphql" - "github.com/graphql-go/graphql/language/ast" + gql "github.com/sourcenetwork/graphql-go" + "github.com/sourcenetwork/graphql-go/language/ast" ) // IsIntrospectionQuery parses a root ast.Document and determines if it is an diff --git a/request/graphql/parser/mutation.go b/request/graphql/parser/mutation.go index c388fa1533..37dea7290b 100644 --- a/request/graphql/parser/mutation.go +++ b/request/graphql/parser/mutation.go @@ -13,8 +13,8 @@ package parser import ( "strings" - gql "github.com/graphql-go/graphql" - "github.com/graphql-go/graphql/language/ast" + gql "github.com/sourcenetwork/graphql-go" + "github.com/sourcenetwork/graphql-go/language/ast" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" diff --git a/request/graphql/parser/query.go b/request/graphql/parser/query.go index 9e17b15955..c76bde7b32 100644 --- a/request/graphql/parser/query.go +++ b/request/graphql/parser/query.go @@ -13,8 +13,8 @@ package parser import ( "strconv" - gql "github.com/graphql-go/graphql" - "github.com/graphql-go/graphql/language/ast" + gql "github.com/sourcenetwork/graphql-go" + "github.com/sourcenetwork/graphql-go/language/ast" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" diff --git a/request/graphql/parser/request.go b/request/graphql/parser/request.go index c7d7c36140..fca63bcac5 100644 --- a/request/graphql/parser/request.go +++ b/request/graphql/parser/request.go @@ -11,8 +11,8 @@ package parser import ( - gql "github.com/graphql-go/graphql" - "github.com/graphql-go/graphql/language/ast" + gql "github.com/sourcenetwork/graphql-go" + "github.com/sourcenetwork/graphql-go/language/ast" "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" diff --git a/request/graphql/parser/subscription.go b/request/graphql/parser/subscription.go index fb27b070f2..354645beb4 100644 --- a/request/graphql/parser/subscription.go +++ b/request/graphql/parser/subscription.go @@ -11,8 +11,8 @@ package parser import ( - gql "github.com/graphql-go/graphql" - "github.com/graphql-go/graphql/language/ast" + gql "github.com/sourcenetwork/graphql-go" + "github.com/sourcenetwork/graphql-go/language/ast" "github.com/sourcenetwork/defradb/client/request" ) diff --git a/request/graphql/schema/collection.go b/request/graphql/schema/collection.go index 00287c4454..15a6283acb 100644 --- a/request/graphql/schema/collection.go +++ b/request/graphql/schema/collection.go @@ -19,14 +19,14 @@ import ( "github.com/sourcenetwork/defradb/client/request" "github.com/sourcenetwork/defradb/request/graphql/schema/types" - "github.com/graphql-go/graphql/language/ast" - gqlp "github.com/graphql-go/graphql/language/parser" - "github.com/graphql-go/graphql/language/source" + "github.com/sourcenetwork/graphql-go/language/ast" + gqlp "github.com/sourcenetwork/graphql-go/language/parser" + "github.com/sourcenetwork/graphql-go/language/source" ) // FromString parses a GQL SDL string into a set of collection descriptions. func FromString(ctx context.Context, schemaString string) ( - []client.CollectionDescription, + []client.CollectionDefinition, error, ) { source := source.NewSource(&source.Source{ @@ -47,11 +47,11 @@ func FromString(ctx context.Context, schemaString string) ( // fromAst parses a GQL AST into a set of collection descriptions. func fromAst(ctx context.Context, doc *ast.Document) ( - []client.CollectionDescription, + []client.CollectionDefinition, error, ) { relationManager := NewRelationManager() - descriptions := []client.CollectionDescription{} + definitions := []client.CollectionDefinition{} for _, def := range doc.Definitions { switch defType := def.(type) { @@ -61,7 +61,7 @@ func fromAst(ctx context.Context, doc *ast.Document) ( return nil, err } - descriptions = append(descriptions, description) + definitions = append(definitions, description) default: // Do nothing, ignore it and continue @@ -72,12 +72,12 @@ func fromAst(ctx context.Context, doc *ast.Document) ( // The details on the relations between objects depend on both sides // of the relationship. The relation manager handles this, and must be applied // after all the collections have been processed. - err := finalizeRelations(relationManager, descriptions) + err := finalizeRelations(relationManager, definitions) if err != nil { return nil, err } - return descriptions, nil + return definitions, nil } // fromAstDefinition parses a AST object definition into a set of collection descriptions. @@ -85,7 +85,7 @@ func fromAstDefinition( ctx context.Context, relationManager *RelationManager, def *ast.ObjectDefinition, -) (client.CollectionDescription, error) { +) (client.CollectionDefinition, error) { fieldDescriptions := []client.FieldDescription{ { Name: request.KeyFieldName, @@ -98,7 +98,7 @@ func fromAstDefinition( for _, field := range def.Fields { tmpFieldsDescriptions, err := fieldsFromAST(field, relationManager, def) if err != nil { - return client.CollectionDescription{}, err + return client.CollectionDefinition{}, err } fieldDescriptions = append(fieldDescriptions, tmpFieldsDescriptions...) @@ -107,7 +107,7 @@ func fromAstDefinition( if directive.Name.Value == types.IndexDirectiveLabel { index, err := fieldIndexFromAST(field, directive) if err != nil { - return client.CollectionDescription{}, err + return client.CollectionDefinition{}, err } indexDescriptions = append(indexDescriptions, index) } @@ -129,19 +129,21 @@ func fromAstDefinition( if directive.Name.Value == types.IndexDirectiveLabel { index, err := indexFromAST(directive) if err != nil { - return client.CollectionDescription{}, err + return client.CollectionDefinition{}, err } indexDescriptions = append(indexDescriptions, index) } } - return client.CollectionDescription{ - Name: def.Name.Value, + return client.CollectionDefinition{ + Description: client.CollectionDescription{ + Name: def.Name.Value, + Indexes: indexDescriptions, + }, Schema: client.SchemaDescription{ Name: def.Name.Value, Fields: fieldDescriptions, }, - Indexes: indexDescriptions, }, nil } @@ -424,9 +426,9 @@ func getRelationshipName( return genRelationName(hostName, targetName) } -func finalizeRelations(relationManager *RelationManager, descriptions []client.CollectionDescription) error { - for _, description := range descriptions { - for i, field := range description.Schema.Fields { +func finalizeRelations(relationManager *RelationManager, definitions []client.CollectionDefinition) error { + for _, definition := range definitions { + for i, field := range definition.Schema.Fields { if field.RelationType == 0 || field.RelationType&client.Relation_Type_INTERNAL_ID != 0 { continue } @@ -447,7 +449,7 @@ func finalizeRelations(relationManager *RelationManager, descriptions []client.C } field.RelationType = rel.Kind() | fieldRelationType - description.Schema.Fields[i] = field + definition.Schema.Fields[i] = field } } diff --git a/request/graphql/schema/descriptions.go b/request/graphql/schema/descriptions.go index 6f967046b9..1aabee729e 100644 --- a/request/graphql/schema/descriptions.go +++ b/request/graphql/schema/descriptions.go @@ -11,7 +11,7 @@ package schema import ( - gql "github.com/graphql-go/graphql" + gql "github.com/sourcenetwork/graphql-go" "github.com/sourcenetwork/defradb/client" ) diff --git a/request/graphql/schema/descriptions_test.go b/request/graphql/schema/descriptions_test.go index 2ce5e55dc9..2368b58c27 100644 --- a/request/graphql/schema/descriptions_test.go +++ b/request/graphql/schema/descriptions_test.go @@ -30,9 +30,12 @@ func TestSingleSimpleType(t *testing.T) { verified: Boolean } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "User", + Description: client.CollectionDescription{ + Name: "User", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "User", Fields: []client.FieldDescription{ @@ -58,7 +61,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -77,9 +79,12 @@ func TestSingleSimpleType(t *testing.T) { rating: Float } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "User", + Description: client.CollectionDescription{ + Name: "User", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "User", Fields: []client.FieldDescription{ @@ -105,10 +110,12 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, { - Name: "Author", + Description: client.CollectionDescription{ + Name: "Author", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Author", Fields: []client.FieldDescription{ @@ -134,7 +141,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -153,9 +159,12 @@ func TestSingleSimpleType(t *testing.T) { published: Book } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "Book", + Description: client.CollectionDescription{ + Name: "Book", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Book", Fields: []client.FieldDescription{ @@ -190,10 +199,12 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, { - Name: "Author", + Description: client.CollectionDescription{ + Name: "Author", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Author", Fields: []client.FieldDescription{ @@ -228,7 +239,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -247,9 +257,12 @@ func TestSingleSimpleType(t *testing.T) { rating: Float } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "User", + Description: client.CollectionDescription{ + Name: "User", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "User", Fields: []client.FieldDescription{ @@ -275,10 +288,12 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, { - Name: "Author", + Description: client.CollectionDescription{ + Name: "Author", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Author", Fields: []client.FieldDescription{ @@ -304,7 +319,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -323,9 +337,12 @@ func TestSingleSimpleType(t *testing.T) { published: Book @relation(name:"book_authors") } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "Book", + Description: client.CollectionDescription{ + Name: "Book", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Book", Fields: []client.FieldDescription{ @@ -360,10 +377,12 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, { - Name: "Author", + Description: client.CollectionDescription{ + Name: "Author", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Author", Fields: []client.FieldDescription{ @@ -398,7 +417,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -417,9 +435,12 @@ func TestSingleSimpleType(t *testing.T) { published: Book } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "Book", + Description: client.CollectionDescription{ + Name: "Book", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Book", Fields: []client.FieldDescription{ @@ -454,10 +475,12 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, { - Name: "Author", + Description: client.CollectionDescription{ + Name: "Author", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Author", Fields: []client.FieldDescription{ @@ -492,7 +515,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -511,9 +533,12 @@ func TestSingleSimpleType(t *testing.T) { published: [Book] } `, - targetDescs: []client.CollectionDescription{ + targetDescs: []client.CollectionDefinition{ { - Name: "Book", + Description: client.CollectionDescription{ + Name: "Book", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Book", Fields: []client.FieldDescription{ @@ -548,10 +573,12 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, { - Name: "Author", + Description: client.CollectionDescription{ + Name: "Author", + Indexes: []client.IndexDescription{}, + }, Schema: client.SchemaDescription{ Name: "Author", Fields: []client.FieldDescription{ @@ -580,7 +607,6 @@ func TestSingleSimpleType(t *testing.T) { }, }, }, - Indexes: []client.IndexDescription{}, }, }, }, @@ -599,12 +625,12 @@ func runCreateDescriptionTest(t *testing.T, testcase descriptionTestCase) { assert.Equal(t, len(descs), len(testcase.targetDescs), testcase.description) for i, d := range descs { - assert.Equal(t, testcase.targetDescs[i], d, testcase.description) + assert.Equal(t, testcase.targetDescs[i].Description, d.Description, testcase.description) } } type descriptionTestCase struct { description string sdl string - targetDescs []client.CollectionDescription + targetDescs []client.CollectionDefinition } diff --git a/request/graphql/schema/generate.go b/request/graphql/schema/generate.go index e30693b3de..0d1f40efe1 100644 --- a/request/graphql/schema/generate.go +++ b/request/graphql/schema/generate.go @@ -14,7 +14,7 @@ import ( "context" "fmt" - gql "github.com/graphql-go/graphql" + gql "github.com/sourcenetwork/graphql-go" "github.com/sourcenetwork/defradb/client" @@ -47,7 +47,7 @@ func (m *SchemaManager) NewGenerator() *Generator { // Generate generates the query-op and mutation-op type definitions from // the given CollectionDescriptions. -func (g *Generator) Generate(ctx context.Context, collections []client.CollectionDescription) ([]*gql.Object, error) { +func (g *Generator) Generate(ctx context.Context, collections []client.CollectionDefinition) ([]*gql.Object, error) { typeMapBeforeMutation := g.manager.schema.TypeMap() typesBeforeMutation := make(map[string]any, len(typeMapBeforeMutation)) @@ -79,7 +79,7 @@ func (g *Generator) Generate(ctx context.Context, collections []client.Collectio // generate generates the query-op and mutation-op type definitions from // the given CollectionDescriptions. -func (g *Generator) generate(ctx context.Context, collections []client.CollectionDescription) ([]*gql.Object, error) { +func (g *Generator) generate(ctx context.Context, collections []client.CollectionDefinition) ([]*gql.Object, error) { // build base types defs, err := g.buildTypes(ctx, collections) if err != nil { @@ -354,7 +354,7 @@ func (g *Generator) createExpandedFieldList( // extract and return the correct gql.Object type(s) func (g *Generator) buildTypes( ctx context.Context, - collections []client.CollectionDescription, + collections []client.CollectionDefinition, ) ([]*gql.Object, error) { // @todo: Check for duplicate named defined types in the TypeMap // get all the defined types from the AST @@ -367,12 +367,12 @@ func (g *Generator) buildTypes( fieldDescriptions := collection.Schema.Fields // check if type exists - if _, ok := g.manager.schema.TypeMap()[collection.Name]; ok { - return nil, NewErrSchemaTypeAlreadyExist(collection.Name) + if _, ok := g.manager.schema.TypeMap()[collection.Description.Name]; ok { + return nil, NewErrSchemaTypeAlreadyExist(collection.Description.Name) } objconf := gql.ObjectConfig{ - Name: collection.Name, + Name: collection.Description.Name, } // Wrap field definition in a thunk so we can @@ -435,9 +435,9 @@ func (g *Generator) buildTypes( Type: gql.Boolean, } - gqlType, ok := g.manager.schema.TypeMap()[collection.Name] + gqlType, ok := g.manager.schema.TypeMap()[collection.Description.Name] if !ok { - return nil, NewErrObjectNotFoundDuringThunk(collection.Name) + return nil, NewErrObjectNotFoundDuringThunk(collection.Description.Name) } fields[request.GroupFieldName] = &gql.Field{ diff --git a/request/graphql/schema/index_test.go b/request/graphql/schema/index_test.go index 379b84647d..155a17fbf6 100644 --- a/request/graphql/schema/index_test.go +++ b/request/graphql/schema/index_test.go @@ -276,9 +276,9 @@ func parseIndexAndTest(t *testing.T, testCase indexTestCase) { cols, err := FromString(ctx, testCase.sdl) assert.NoError(t, err, testCase.description) assert.Equal(t, len(cols), 1, testCase.description) - assert.Equal(t, len(cols[0].Indexes), len(testCase.targetDescriptions), testCase.description) + assert.Equal(t, len(cols[0].Description.Indexes), len(testCase.targetDescriptions), testCase.description) - for i, d := range cols[0].Indexes { + for i, d := range cols[0].Description.Indexes { assert.Equal(t, testCase.targetDescriptions[i], d, testCase.description) } } diff --git a/request/graphql/schema/manager.go b/request/graphql/schema/manager.go index 76a5441d70..0f96ec2a29 100644 --- a/request/graphql/schema/manager.go +++ b/request/graphql/schema/manager.go @@ -11,7 +11,7 @@ package schema import ( - gql "github.com/graphql-go/graphql" + gql "github.com/sourcenetwork/graphql-go" schemaTypes "github.com/sourcenetwork/defradb/request/graphql/schema/types" ) @@ -53,7 +53,7 @@ func (s *SchemaManager) Schema() *gql.Schema { func (s *SchemaManager) ResolveTypes() error { // basically, this function just refreshes the // schema.TypeMap, and runs the internal - // typeMapReducer (https://github.com/graphql-go/graphql/blob/v0.7.9/schema.go#L275) + // typeMapReducer (https://github.com/sourcenetwork/graphql-go/blob/v0.7.9/schema.go#L275) // which ensures all the necessary types are defined in the // typeMap, and if there are any outstanding Thunks/closures // resolve them. diff --git a/request/graphql/schema/types/base.go b/request/graphql/schema/types/base.go index 6a48e91349..b348a564f8 100644 --- a/request/graphql/schema/types/base.go +++ b/request/graphql/schema/types/base.go @@ -11,7 +11,7 @@ package types import ( - gql "github.com/graphql-go/graphql" + gql "github.com/sourcenetwork/graphql-go" ) // BooleanOperatorBlock filter block for boolean types. diff --git a/request/graphql/schema/types/commits.go b/request/graphql/schema/types/commits.go index 44a13b637f..4da8d2dd3a 100644 --- a/request/graphql/schema/types/commits.go +++ b/request/graphql/schema/types/commits.go @@ -11,7 +11,7 @@ package types import ( - gql "github.com/graphql-go/graphql" + gql "github.com/sourcenetwork/graphql-go" "github.com/sourcenetwork/defradb/client/request" ) diff --git a/request/graphql/schema/types/types.go b/request/graphql/schema/types/types.go index 75f91fb2c5..c28ef566ea 100644 --- a/request/graphql/schema/types/types.go +++ b/request/graphql/schema/types/types.go @@ -11,7 +11,7 @@ package types import ( - gql "github.com/graphql-go/graphql" + gql "github.com/sourcenetwork/graphql-go" ) const ( diff --git a/tests/bench/bench_util.go b/tests/bench/bench_util.go index 4ffe998d88..0a9127d816 100644 --- a/tests/bench/bench_util.go +++ b/tests/bench/bench_util.go @@ -18,8 +18,8 @@ import ( "sync" "testing" - "github.com/dgraph-io/badger/v4" ds "github.com/ipfs/go-datastore" + "github.com/sourcenetwork/badger/v4" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/errors" @@ -85,7 +85,7 @@ func ConstructSchema(fixture fixtures.Generator) (string, error) { // loop to get the schemas for i := 0; i < numTypes; i++ { - gql, err := fixtures.ExtractGQLFromType(fixture.Types()[i]) + gql, err := fixture.ExtractGQLFromType(fixture.Types()[i]) if err != nil { return "", errors.Wrap("failed generating GQL", err) } diff --git a/tests/bench/collection/utils.go b/tests/bench/collection/utils.go index 68df9531ed..dfb63fc86b 100644 --- a/tests/bench/collection/utils.go +++ b/tests/bench/collection/utils.go @@ -38,7 +38,7 @@ func runCollectionBenchGet( if err != nil { return err } - defer db.Close(ctx) + defer db.Close() dockeys, err := benchutils.BackfillBenchmarkDB( b, @@ -123,7 +123,7 @@ func runCollectionBenchCreate( if err != nil { return err } - defer db.Close(ctx) + defer db.Close() _, err = benchutils.BackfillBenchmarkDB(b, ctx, collections, fixture, docCount, opCount, doSync) if err != nil { @@ -149,7 +149,7 @@ func runCollectionBenchCreateMany( if err != nil { return err } - defer db.Close(ctx) + defer db.Close() _, err = benchutils.BackfillBenchmarkDB(b, ctx, collections, fixture, docCount, opCount, doSync) if err != nil { diff --git a/tests/bench/fixtures/fixtures.go b/tests/bench/fixtures/fixtures.go index 7b19b58f68..65ecf94e22 100644 --- a/tests/bench/fixtures/fixtures.go +++ b/tests/bench/fixtures/fixtures.go @@ -16,6 +16,7 @@ import ( "encoding/json" "fmt" "reflect" + "strings" "github.com/bxcodec/faker" @@ -28,19 +29,41 @@ var ( } ) +type Option func(*Generator) + +func OptionFieldDirective(typeName, field, directive string) Option { + return func(g *Generator) { + if g.directives == nil { + g.directives = make(map[string]map[string][]string) + } + if g.directives[typeName] == nil { + g.directives[typeName] = make(map[string][]string) + } + g.directives[typeName][field] = append(g.directives[typeName][field], directive) + } +} + type Generator struct { ctx context.Context schema string types []any + // map of type name to field name to list of directives + directives map[string]map[string][]string } -func ForSchema(ctx context.Context, schemaName string) Generator { - return Generator{ +func ForSchema(ctx context.Context, schemaName string, options ...Option) Generator { + g := Generator{ ctx: ctx, schema: schemaName, types: registeredFixtures[schemaName], } + + for _, o := range options { + o(&g) + } + + return g } // Types returns the defined types for this fixture set @@ -85,7 +108,7 @@ func (g Generator) GenerateDocs() ([]string, error) { // extractGQLFromType extracts a GraphQL SDL definition as a string // from a given type struct -func ExtractGQLFromType(t any) (string, error) { +func (g Generator) ExtractGQLFromType(t any) (string, error) { var buf bytes.Buffer if reflect.TypeOf(t).Kind() != reflect.Struct { @@ -104,7 +127,17 @@ func ExtractGQLFromType(t any) (string, error) { fname := f.Name ftype := f.Type.Name() gqlType := gTypeToGQLType[ftype] - fmt.Fprintf(&buf, "\t%s: %s\n", fname, gqlType) + + directives := "" + if g.directives != nil { + if dirsMap, ok := g.directives[name]; ok { + if dirs, ok := dirsMap[fname]; ok { + directives = " " + strings.Join(dirs, " ") + } + } + } + // write field's name, type and directives + fmt.Fprintf(&buf, "\t%s: %s%s\n", fname, gqlType, directives) } fmt.Fprint(&buf, "}") diff --git a/tests/bench/query/index/simple_test.go b/tests/bench/query/index/simple_test.go new file mode 100644 index 0000000000..e675086a2a --- /dev/null +++ b/tests/bench/query/index/simple_test.go @@ -0,0 +1,97 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package query + +import ( + "context" + "testing" + + "github.com/sourcenetwork/defradb/tests/bench/fixtures" + query "github.com/sourcenetwork/defradb/tests/bench/query/simple" +) + +var ( + userSimpleWithFilterQuery = ` + query { + User(filter: { Age: { _eq: 30 } }) { + _key + Name + Age + Points + Verified + } + } + ` +) + +func makeUserAgeIndexOption() fixtures.Option { + return fixtures.OptionFieldDirective("User", "Age", "@index") +} + +func Benchmark_Index_UserSimple_QueryWithFilterOnIndex_Sync_1(b *testing.B) { + ctx := context.Background() + err := query.RunQueryBenchGet( + b, + ctx, + fixtures.ForSchema(ctx, "user_simple", makeUserAgeIndexOption()), + 1, + userSimpleWithFilterQuery, + false, + ) + if err != nil { + b.Fatal(err) + } +} + +func Benchmark_Index_UserSimple_QueryWithFilterOnIndex_Sync_10(b *testing.B) { + ctx := context.Background() + err := query.RunQueryBenchGet( + b, + ctx, + fixtures.ForSchema(ctx, "user_simple", makeUserAgeIndexOption()), + 10, + userSimpleWithFilterQuery, + false, + ) + if err != nil { + b.Fatal(err) + } +} + +func Benchmark_Index_UserSimple_QueryWithFilterOnIndex_Sync_1000(b *testing.B) { + ctx := context.Background() + err := query.RunQueryBenchGet( + b, + ctx, + fixtures.ForSchema(ctx, "user_simple", makeUserAgeIndexOption()), + 1000, + userSimpleWithFilterQuery, + false, + ) + if err != nil { + b.Fatal(err) + } +} + +func Benchmark_Index_UserSimple_QueryWithFilterOnIndex_Sync_10000(b *testing.B) { + ctx := context.Background() + err := query.RunQueryBenchGet( + b, + ctx, + fixtures.ForSchema(ctx, "user_simple", makeUserAgeIndexOption()), + 10000, + userSimpleWithFilterQuery, + false, + ) + if err != nil { + b.Fatal(err) + } +} diff --git a/tests/bench/query/planner/utils.go b/tests/bench/query/planner/utils.go index 2f70245b23..fdd504175a 100644 --- a/tests/bench/query/planner/utils.go +++ b/tests/bench/query/planner/utils.go @@ -59,7 +59,7 @@ func runMakePlanBench( if err != nil { return err } - defer db.Close(ctx) + defer db.Close() parser, err := buildParser(ctx, fixture) if err != nil { @@ -127,6 +127,7 @@ type dummyTxn struct{} func (*dummyTxn) Rootstore() datastore.DSReaderWriter { return nil } func (*dummyTxn) Datastore() datastore.DSReaderWriter { return nil } func (*dummyTxn) Headstore() datastore.DSReaderWriter { return nil } +func (*dummyTxn) Peerstore() datastore.DSBatching { return nil } func (*dummyTxn) DAGstore() datastore.DAGStore { return nil } func (*dummyTxn) Systemstore() datastore.DSReaderWriter { return nil } func (*dummyTxn) Commit(ctx context.Context) error { return nil } diff --git a/tests/bench/query/simple/simple_test.go b/tests/bench/query/simple/simple_test.go index ca0d627275..a9791bcbc7 100644 --- a/tests/bench/query/simple/simple_test.go +++ b/tests/bench/query/simple/simple_test.go @@ -33,7 +33,7 @@ var ( func Benchmark_Query_UserSimple_Query_Sync_1(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -48,7 +48,7 @@ func Benchmark_Query_UserSimple_Query_Sync_1(b *testing.B) { func Benchmark_Query_UserSimple_Query_Sync_10(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -63,7 +63,7 @@ func Benchmark_Query_UserSimple_Query_Sync_10(b *testing.B) { func Benchmark_Query_UserSimple_Query_Sync_100(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -78,7 +78,7 @@ func Benchmark_Query_UserSimple_Query_Sync_100(b *testing.B) { func Benchmark_Query_UserSimple_Query_Sync_1000(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), diff --git a/tests/bench/query/simple/utils.go b/tests/bench/query/simple/utils.go index 32e6525dc7..8c6f82579b 100644 --- a/tests/bench/query/simple/utils.go +++ b/tests/bench/query/simple/utils.go @@ -27,7 +27,7 @@ var ( // log = logging.MustNewLogger("bench") ) -func runQueryBenchGet( +func RunQueryBenchGet( b *testing.B, ctx context.Context, fixture fixtures.Generator, @@ -39,7 +39,7 @@ func runQueryBenchGet( if err != nil { return err } - defer db.Close(ctx) + defer db.Close() dockeys, err := benchutils.BackfillBenchmarkDB( b, diff --git a/tests/bench/query/simple/with_filter_test.go b/tests/bench/query/simple/with_filter_test.go index 86323e2beb..60081167a3 100644 --- a/tests/bench/query/simple/with_filter_test.go +++ b/tests/bench/query/simple/with_filter_test.go @@ -33,7 +33,7 @@ var ( func Benchmark_Query_UserSimple_Query_WithFilter_Sync_1(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -48,7 +48,7 @@ func Benchmark_Query_UserSimple_Query_WithFilter_Sync_1(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithFilter_Sync_10(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -63,7 +63,7 @@ func Benchmark_Query_UserSimple_Query_WithFilter_Sync_10(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithFilter_Sync_100(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -78,7 +78,7 @@ func Benchmark_Query_UserSimple_Query_WithFilter_Sync_100(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithFilter_Sync_1000(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), diff --git a/tests/bench/query/simple/with_limit_offset_test.go b/tests/bench/query/simple/with_limit_offset_test.go index 97dc523455..e47d8f347e 100644 --- a/tests/bench/query/simple/with_limit_offset_test.go +++ b/tests/bench/query/simple/with_limit_offset_test.go @@ -33,7 +33,7 @@ var ( func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_1(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -48,7 +48,7 @@ func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_1(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_10(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -63,7 +63,7 @@ func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_10(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_100(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -78,7 +78,7 @@ func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_100(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithLimitOffset_Sync_1000(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), diff --git a/tests/bench/query/simple/with_multi_lookup_test.go b/tests/bench/query/simple/with_multi_lookup_test.go index 6af7b6e20a..2c744319a3 100644 --- a/tests/bench/query/simple/with_multi_lookup_test.go +++ b/tests/bench/query/simple/with_multi_lookup_test.go @@ -34,7 +34,7 @@ var ( func Benchmark_Query_UserSimple_Query_WithMultiLookup_Sync_10(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -49,7 +49,7 @@ func Benchmark_Query_UserSimple_Query_WithMultiLookup_Sync_10(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithMultiLookup_Sync_100(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -64,7 +64,7 @@ func Benchmark_Query_UserSimple_Query_WithMultiLookup_Sync_100(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithMultiLookup_Sync_1000(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), diff --git a/tests/bench/query/simple/with_order_test.go b/tests/bench/query/simple/with_order_test.go index ec1a757f2b..2b12817713 100644 --- a/tests/bench/query/simple/with_order_test.go +++ b/tests/bench/query/simple/with_order_test.go @@ -33,7 +33,7 @@ var ( func Benchmark_Query_UserSimple_Query_WithSort_Sync_1(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -48,7 +48,7 @@ func Benchmark_Query_UserSimple_Query_WithSort_Sync_1(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithSort_Sync_10(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -63,7 +63,7 @@ func Benchmark_Query_UserSimple_Query_WithSort_Sync_10(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithSort_Sync_100(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -78,7 +78,7 @@ func Benchmark_Query_UserSimple_Query_WithSort_Sync_100(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithSort_Sync_1000(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), diff --git a/tests/bench/query/simple/with_single_lookup_test.go b/tests/bench/query/simple/with_single_lookup_test.go index a2fb7e3b59..d432f730be 100644 --- a/tests/bench/query/simple/with_single_lookup_test.go +++ b/tests/bench/query/simple/with_single_lookup_test.go @@ -34,7 +34,7 @@ var ( func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_1(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -49,7 +49,7 @@ func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_1(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_10(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -64,7 +64,7 @@ func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_10(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_100(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), @@ -79,7 +79,7 @@ func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_100(b *testing.B) { func Benchmark_Query_UserSimple_Query_WithSingleLookup_Sync_1000(b *testing.B) { ctx := context.Background() - err := runQueryBenchGet( + err := RunQueryBenchGet( b, ctx, fixtures.ForSchema(ctx, "user_simple"), diff --git a/tests/change_detector/README.md b/tests/change_detector/README.md new file mode 100644 index 0000000000..4d824fb60f --- /dev/null +++ b/tests/change_detector/README.md @@ -0,0 +1,15 @@ +# Change Detector + +The change detector is used to detect data format changes between versions of DefraDB. + +## How it works + +The tests run using a `source` and `target` branch of DefraDB. Each branch is cloned into a temporary directory and dependencies are installed. + +The test runner executes all of the common test packages available in the `source` and `target` tests directory. + +For each test package execution the following steps occur: + +- Create a temporary data directory. This is used to share data between `source` and `target`. +- Run the `source` version in setup only mode. This creates test fixtures in the shared data directory. +- Run the `target` version in change detector mode. This skips the setup and executes the tests using the shared data directory. diff --git a/tests/change_detector/change_detector_test.go b/tests/change_detector/change_detector_test.go new file mode 100644 index 0000000000..519bc7d965 --- /dev/null +++ b/tests/change_detector/change_detector_test.go @@ -0,0 +1,206 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +//go:build change_detector + +package change_detector + +import ( + "fmt" + "io/fs" + "os" + "os/exec" + "path" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestChanges(t *testing.T) { + sourceRepoDir := t.TempDir() + execClone(t, sourceRepoDir, Repository, SourceBranch) + + var targetRepoDir string + if TargetBranch == "" { + // default to the local branch + out, err := exec.Command("git", "rev-parse", "--show-toplevel").Output() + require.NoError(t, err, string(out)) + targetRepoDir = strings.TrimSpace(string(out)) + } else { + // check out the target branch + targetRepoDir = t.TempDir() + execClone(t, targetRepoDir, Repository, TargetBranch) + } + + if checkIfDatabaseFormatChangesAreDocumented(t, sourceRepoDir, targetRepoDir) { + t.Skip("skipping test with documented database format changes") + } + + execMakeDeps(t, sourceRepoDir) + execMakeDeps(t, targetRepoDir) + + targetRepoTestDir := filepath.Join(targetRepoDir, "tests", "integration") + targetRepoPkgList := execList(t, targetRepoTestDir) + + sourceRepoTestDir := filepath.Join(sourceRepoDir, "tests", "integration") + sourceRepoPkgList := execList(t, sourceRepoTestDir) + + sourceRepoPkgMap := make(map[string]bool) + for _, pkg := range sourceRepoPkgList { + sourceRepoPkgMap[pkg] = true + } + + for _, pkg := range targetRepoPkgList { + pkgName := strings.TrimPrefix(pkg, "github.com/sourcenetwork/defradb/") + t.Run(pkgName, func(t *testing.T) { + if pkg == "" || !sourceRepoPkgMap[pkg] { + t.Skip("skipping unknown or new test package") + } + + t.Parallel() + dataDir := t.TempDir() + + sourceTestPkg := filepath.Join(sourceRepoDir, pkgName) + execTest(t, sourceTestPkg, dataDir, true) + + targetTestPkg := filepath.Join(targetRepoDir, pkgName) + execTest(t, targetTestPkg, dataDir, false) + }) + } +} + +// execList returns a list of all packages in the given directory. +func execList(t *testing.T, dir string) []string { + cmd := exec.Command("go", "list", "./...") + cmd.Dir = dir + + out, err := cmd.Output() + require.NoError(t, err, string(out)) + + return strings.Split(string(out), "\n") +} + +// execTest runs the tests in the given directory and sets the data +// directory and setup only environment variables. +func execTest(t *testing.T, dir, dataDir string, setupOnly bool) { + cmd := exec.Command("go", "test", ".", "-count", "1", "-v") + cmd.Dir = dir + cmd.Env = append( + os.Environ(), + fmt.Sprintf("%s=%s", enableEnvName, "true"), + fmt.Sprintf("%s=%s", rootDataDirEnvName, dataDir), + ) + + if setupOnly { + cmd.Env = append(cmd.Env, fmt.Sprintf("%s=%s", setupOnlyEnvName, "true")) + } + + out, err := cmd.Output() + require.NoError(t, err, string(out)) +} + +// execClone clones the repo from the given url and branch into the directory. +func execClone(t *testing.T, dir, url, branch string) { + cmd := exec.Command( + "git", + "clone", + "--single-branch", + "--branch", branch, + "--depth", "1", + url, + dir, + ) + + out, err := cmd.Output() + require.NoError(t, err, string(out)) +} + +// execMakeDeps runs make:deps in the given directory. +func execMakeDeps(t *testing.T, dir string) { + cmd := exec.Command("make", "deps:lens") + cmd.Dir = dir + + out, err := cmd.Output() + require.NoError(t, err, string(out)) +} + +func checkIfDatabaseFormatChangesAreDocumented(t *testing.T, sourceDir, targetDir string) bool { + sourceChanges, ok := getDatabaseFormatDocumentation(t, sourceDir, false) + require.True(t, ok, "Documentation directory not found") + + changes := make(map[string]struct{}, len(sourceChanges)) + for _, f := range sourceChanges { + // Note: we assume flat directory for now - sub directories are not expanded + changes[f.Name()] = struct{}{} + } + + targetChanges, ok := getDatabaseFormatDocumentation(t, targetDir, true) + require.True(t, ok, "Documentation directory not found") + + for _, f := range targetChanges { + if _, isChangeOld := changes[f.Name()]; !isChangeOld { + // If there is a new file in the directory then the change + // has been documented and the test should pass + return true + } + } + + return false +} + +func getDatabaseFormatDocumentation(t *testing.T, startPath string, allowDescend bool) ([]fs.DirEntry, bool) { + startInfo, err := os.Stat(startPath) + require.NoError(t, err) + + var currentDirectory string + if startInfo.IsDir() { + currentDirectory = startPath + } else { + currentDirectory = path.Dir(startPath) + } + + for { + directoryContents, err := os.ReadDir(currentDirectory) + require.NoError(t, err) + + for _, directoryItem := range directoryContents { + directoryItemPath := path.Join(currentDirectory, directoryItem.Name()) + if directoryItem.Name() == documentationDirectoryName { + probableFormatChangeDirectoryContents, err := os.ReadDir(directoryItemPath) + require.NoError(t, err) + + for _, possibleDocumentationItem := range probableFormatChangeDirectoryContents { + if path.Ext(possibleDocumentationItem.Name()) == ".md" { + // If the directory's name matches the expected, and contains .md files + // we assume it is the documentation directory + return probableFormatChangeDirectoryContents, true + } + } + } else { + if directoryItem.IsDir() { + childContents, directoryFound := getDatabaseFormatDocumentation(t, directoryItemPath, false) + if directoryFound { + return childContents, true + } + } + } + } + + if allowDescend { + // If not found in this directory, continue down the path + currentDirectory = path.Dir(currentDirectory) + require.True(t, currentDirectory != "." && currentDirectory != "/") + } else { + return []fs.DirEntry{}, false + } + } +} diff --git a/tests/change_detector/utils.go b/tests/change_detector/utils.go new file mode 100644 index 0000000000..4e6e938aa5 --- /dev/null +++ b/tests/change_detector/utils.go @@ -0,0 +1,102 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package change_detector + +import ( + "os" + "path" + "strconv" + "testing" + + "github.com/stretchr/testify/require" +) + +var ( + // Enabled is true when the change detector is running. + Enabled bool + // SetupOnly is true when the change detector is running in setup mode. + SetupOnly bool + // Repository is the url of the repository to run change detector on. + Repository string + // SourceBranch is the name of the source branch to run change detector on. + SourceBranch string + // TargetBranch is the name of the target branch to run change detector on. + TargetBranch string + // rootDatabaseDir is the shared database directory for running tests. + rootDatabaseDir string + // previousTestCaseTestName is the name of the previous test. + previousTestCaseTestName string +) + +const ( + repositoryEnvName = "DEFRA_CHANGE_DETECTOR_REPOSITORY" + sourceBranchEnvName = "DEFRA_CHANGE_DETECTOR_SOURCE_BRANCH" + targetBranchEnvName = "DEFRA_CHANGE_DETECTOR_TARGET_BRANCH" + setupOnlyEnvName = "DEFRA_CHANGE_DETECTOR_SETUP_ONLY" + rootDataDirEnvName = "DEFRA_CHANGE_DETECTOR_ROOT_DATA_DIR" + enableEnvName = "DEFRA_CHANGE_DETECTOR_ENABLE" +) + +const ( + defaultRepository = "https://github.com/sourcenetwork/defradb.git" + defaultSourceBranch = "develop" + documentationDirectoryName = "data_format_changes" +) + +func init() { + Enabled, _ = strconv.ParseBool(os.Getenv(enableEnvName)) + SetupOnly, _ = strconv.ParseBool(os.Getenv(setupOnlyEnvName)) + TargetBranch = os.Getenv(targetBranchEnvName) + rootDatabaseDir = os.Getenv(rootDataDirEnvName) + + if value, ok := os.LookupEnv(repositoryEnvName); ok { + Repository = value + } else { + Repository = defaultRepository + } + + if value, ok := os.LookupEnv(sourceBranchEnvName); ok { + SourceBranch = value + } else { + SourceBranch = defaultSourceBranch + } +} + +// DatabaseDir returns the database directory for change detector test. +func DatabaseDir(t testing.TB) string { + return path.Join(rootDatabaseDir, t.Name()) +} + +// PreTestChecks skips any test that can't be run by the change detector. +func PreTestChecks(t *testing.T, collectionNames []string) { + if !Enabled { + return + } + + if previousTestCaseTestName == t.Name() { + t.Skip("skipping duplicate test") + } + previousTestCaseTestName = t.Name() + + if len(collectionNames) == 0 { + t.Skip("skipping test with no collections") + } + + if SetupOnly { + return + } + + _, err := os.Stat(DatabaseDir(t)) + if os.IsNotExist(err) { + t.Skip("skipping new test package") + } + require.NoError(t, err) +} diff --git a/tests/clients/cli/wrapper.go b/tests/clients/cli/wrapper.go new file mode 100644 index 0000000000..43c0aba820 --- /dev/null +++ b/tests/clients/cli/wrapper.go @@ -0,0 +1,519 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "bufio" + "context" + "encoding/json" + "fmt" + "io" + "net/http/httptest" + "strings" + + blockstore "github.com/ipfs/boxo/blockstore" + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/sourcenetwork/defradb/cli" + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/http" + "github.com/sourcenetwork/defradb/net" +) + +var _ client.P2P = (*Wrapper)(nil) + +type Wrapper struct { + node *net.Node + cmd *cliWrapper + handler *http.Handler + httpServer *httptest.Server +} + +func NewWrapper(node *net.Node) (*Wrapper, error) { + handler, err := http.NewHandler(node, http.ServerOptions{}) + if err != nil { + return nil, err + } + + httpServer := httptest.NewServer(handler) + cmd := newCliWrapper(httpServer.URL) + + return &Wrapper{ + node: node, + cmd: cmd, + httpServer: httpServer, + handler: handler, + }, nil +} + +func (w *Wrapper) PeerInfo() peer.AddrInfo { + args := []string{"client", "p2p", "info"} + + data, err := w.cmd.execute(context.Background(), args) + if err != nil { + panic(fmt.Sprintf("failed to get peer info: %v", err)) + } + var info peer.AddrInfo + if err := json.Unmarshal(data, &info); err != nil { + panic(fmt.Sprintf("failed to get peer info: %v", err)) + } + return info +} + +func (w *Wrapper) SetReplicator(ctx context.Context, rep client.Replicator) error { + args := []string{"client", "p2p", "replicator", "set"} + args = append(args, "--collection", strings.Join(rep.Schemas, ",")) + + info, err := json.Marshal(rep.Info) + if err != nil { + return err + } + args = append(args, string(info)) + + _, err = w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) DeleteReplicator(ctx context.Context, rep client.Replicator) error { + args := []string{"client", "p2p", "replicator", "delete"} + args = append(args, "--collection", strings.Join(rep.Schemas, ",")) + + info, err := json.Marshal(rep.Info) + if err != nil { + return err + } + args = append(args, string(info)) + + _, err = w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) GetAllReplicators(ctx context.Context) ([]client.Replicator, error) { + args := []string{"client", "p2p", "replicator", "getall"} + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var reps []client.Replicator + if err := json.Unmarshal(data, &reps); err != nil { + return nil, err + } + return reps, nil +} + +func (w *Wrapper) AddP2PCollections(ctx context.Context, collectionIDs []string) error { + args := []string{"client", "p2p", "collection", "add"} + args = append(args, strings.Join(collectionIDs, ",")) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { + args := []string{"client", "p2p", "collection", "remove"} + args = append(args, strings.Join(collectionIDs, ",")) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) GetAllP2PCollections(ctx context.Context) ([]string, error) { + args := []string{"client", "p2p", "collection", "getall"} + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var cols []string + if err := json.Unmarshal(data, &cols); err != nil { + return nil, err + } + return cols, nil +} + +func (w *Wrapper) BasicImport(ctx context.Context, filepath string) error { + args := []string{"client", "backup", "import"} + args = append(args, filepath) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) BasicExport(ctx context.Context, config *client.BackupConfig) error { + args := []string{"client", "backup", "export"} + + if len(config.Collections) > 0 { + args = append(args, "--collections", strings.Join(config.Collections, ",")) + } + if config.Format != "" { + args = append(args, "--format", config.Format) + } + if config.Pretty { + args = append(args, "--pretty") + } + args = append(args, config.Filepath) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) AddSchema(ctx context.Context, schema string) ([]client.CollectionDescription, error) { + args := []string{"client", "schema", "add"} + args = append(args, schema) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var cols []client.CollectionDescription + if err := json.Unmarshal(data, &cols); err != nil { + return nil, err + } + return cols, nil +} + +func (w *Wrapper) PatchSchema(ctx context.Context, patch string, setDefault bool) error { + args := []string{"client", "schema", "patch"} + if setDefault { + args = append(args, "--set-default") + } + args = append(args, patch) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { + args := []string{"client", "schema", "set-default"} + args = append(args, schemaVersionID) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Wrapper) SetMigration(ctx context.Context, config client.LensConfig) error { + return w.LensRegistry().SetMigration(ctx, config) +} + +func (w *Wrapper) LensRegistry() client.LensRegistry { + return &LensRegistry{w.cmd} +} + +func (w *Wrapper) GetCollectionByName(ctx context.Context, name client.CollectionName) (client.Collection, error) { + args := []string{"client", "collection", "describe"} + args = append(args, "--name", name) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var definition client.CollectionDefinition + if err := json.Unmarshal(data, &definition); err != nil { + return nil, err + } + return &Collection{w.cmd, definition}, nil +} + +func (w *Wrapper) GetCollectionsBySchemaRoot(ctx context.Context, schemaRoot string) ([]client.Collection, error) { + args := []string{"client", "collection", "describe"} + args = append(args, "--schema", schemaRoot) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var colDesc []client.CollectionDefinition + if err := json.Unmarshal(data, &colDesc); err != nil { + return nil, err + } + cols := make([]client.Collection, len(colDesc)) + for i, v := range colDesc { + cols[i] = &Collection{w.cmd, v} + } + return cols, err +} + +func (w *Wrapper) GetCollectionsByVersionID(ctx context.Context, versionId string) ([]client.Collection, error) { + args := []string{"client", "collection", "describe"} + args = append(args, "--version", versionId) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var colDesc []client.CollectionDefinition + if err := json.Unmarshal(data, &colDesc); err != nil { + return nil, err + } + cols := make([]client.Collection, len(colDesc)) + for i, v := range colDesc { + cols[i] = &Collection{w.cmd, v} + } + return cols, err +} + +func (w *Wrapper) GetAllCollections(ctx context.Context) ([]client.Collection, error) { + args := []string{"client", "collection", "describe"} + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var colDesc []client.CollectionDefinition + if err := json.Unmarshal(data, &colDesc); err != nil { + return nil, err + } + cols := make([]client.Collection, len(colDesc)) + for i, v := range colDesc { + cols[i] = &Collection{w.cmd, v} + } + return cols, err +} + +func (w *Wrapper) GetSchemasByName(ctx context.Context, name string) ([]client.SchemaDescription, error) { + args := []string{"client", "schema", "describe"} + args = append(args, "--name", name) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var schema []client.SchemaDescription + if err := json.Unmarshal(data, &schema); err != nil { + return nil, err + } + return schema, err +} + +func (w *Wrapper) GetSchemaByVersionID(ctx context.Context, versionID string) (client.SchemaDescription, error) { + args := []string{"client", "schema", "describe"} + args = append(args, "--version", versionID) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return client.SchemaDescription{}, err + } + var schema client.SchemaDescription + if err := json.Unmarshal(data, &schema); err != nil { + return client.SchemaDescription{}, err + } + return schema, err +} + +func (w *Wrapper) GetSchemasByRoot(ctx context.Context, root string) ([]client.SchemaDescription, error) { + args := []string{"client", "schema", "describe"} + args = append(args, "--root", root) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var schema []client.SchemaDescription + if err := json.Unmarshal(data, &schema); err != nil { + return nil, err + } + return schema, err +} + +func (w *Wrapper) GetAllSchemas(ctx context.Context) ([]client.SchemaDescription, error) { + args := []string{"client", "schema", "describe"} + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var schema []client.SchemaDescription + if err := json.Unmarshal(data, &schema); err != nil { + return nil, err + } + return schema, err +} + +func (w *Wrapper) GetAllIndexes(ctx context.Context) (map[client.CollectionName][]client.IndexDescription, error) { + args := []string{"client", "index", "list"} + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var indexes map[client.CollectionName][]client.IndexDescription + if err := json.Unmarshal(data, &indexes); err != nil { + return nil, err + } + return indexes, nil +} + +func (w *Wrapper) ExecRequest(ctx context.Context, query string) *client.RequestResult { + args := []string{"client", "query"} + args = append(args, query) + + result := &client.RequestResult{} + + stdOut, stdErr, err := w.cmd.executeStream(ctx, args) + if err != nil { + result.GQL.Errors = []error{err} + return result + } + buffer := bufio.NewReader(stdOut) + header, err := buffer.ReadString('\n') + if err != nil { + result.GQL.Errors = []error{err} + return result + } + if header == cli.SUB_RESULTS_HEADER { + result.Pub = w.execRequestSubscription(ctx, buffer) + return result + } + data, err := io.ReadAll(buffer) + if err != nil { + result.GQL.Errors = []error{err} + return result + } + errData, err := io.ReadAll(stdErr) + if err != nil { + result.GQL.Errors = []error{err} + return result + } + if len(errData) > 0 { + result.GQL.Errors = []error{fmt.Errorf("%s", errData)} + return result + } + + var response http.GraphQLResponse + if err = json.Unmarshal(data, &response); err != nil { + result.GQL.Errors = []error{err} + return result + } + result.GQL.Data = response.Data + result.GQL.Errors = response.Errors + return result +} + +func (w *Wrapper) execRequestSubscription(ctx context.Context, r io.Reader) *events.Publisher[events.Update] { + pubCh := events.New[events.Update](0, 0) + pub, err := events.NewPublisher[events.Update](pubCh, 0) + if err != nil { + return nil + } + + go func() { + dec := json.NewDecoder(r) + + for { + var response http.GraphQLResponse + if err := dec.Decode(&response); err != nil { + return + } + pub.Publish(client.GQLResult{ + Errors: response.Errors, + Data: response.Data, + }) + } + }() + + return pub +} + +func (w *Wrapper) NewTxn(ctx context.Context, readOnly bool) (datastore.Txn, error) { + args := []string{"client", "tx", "create"} + if readOnly { + args = append(args, "--read-only") + } + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var res http.CreateTxResponse + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + tx, err := w.handler.Transaction(res.ID) + if err != nil { + return nil, err + } + return &Transaction{tx, w.cmd}, nil +} + +func (w *Wrapper) NewConcurrentTxn(ctx context.Context, readOnly bool) (datastore.Txn, error) { + args := []string{"client", "tx", "create"} + args = append(args, "--concurrent") + + if readOnly { + args = append(args, "--read-only") + } + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var res http.CreateTxResponse + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + tx, err := w.handler.Transaction(res.ID) + if err != nil { + return nil, err + } + return &Transaction{tx, w.cmd}, nil +} + +func (w *Wrapper) WithTxn(tx datastore.Txn) client.Store { + return &Wrapper{ + node: w.node, + cmd: w.cmd.withTxn(tx), + } +} + +func (w *Wrapper) Root() datastore.RootStore { + return w.node.Root() +} + +func (w *Wrapper) Blockstore() blockstore.Blockstore { + return w.node.Blockstore() +} + +func (w *Wrapper) Peerstore() datastore.DSBatching { + return w.node.Peerstore() +} + +func (w *Wrapper) Close() { + w.httpServer.CloseClientConnections() + w.httpServer.Close() + w.node.Close() +} + +func (w *Wrapper) Events() events.Events { + return w.node.Events() +} + +func (w *Wrapper) MaxTxnRetries() int { + return w.node.MaxTxnRetries() +} + +func (w *Wrapper) PrintDump(ctx context.Context) error { + return w.node.PrintDump(ctx) +} + +func (w *Wrapper) Bootstrap(addrs []peer.AddrInfo) { + w.node.Bootstrap(addrs) +} + +func (w *Wrapper) WaitForPushLogByPeerEvent(id peer.ID) error { + return w.node.WaitForPushLogByPeerEvent(id) +} + +func (w *Wrapper) WaitForPushLogFromPeerEvent(id peer.ID) error { + return w.node.WaitForPushLogFromPeerEvent(id) +} diff --git a/tests/clients/cli/wrapper_cli.go b/tests/clients/cli/wrapper_cli.go new file mode 100644 index 0000000000..1f73b20e25 --- /dev/null +++ b/tests/clients/cli/wrapper_cli.go @@ -0,0 +1,85 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "fmt" + "io" + "strings" + + "github.com/sourcenetwork/defradb/cli" + "github.com/sourcenetwork/defradb/config" + "github.com/sourcenetwork/defradb/datastore" +) + +type cliWrapper struct { + address string + txValue string +} + +func newCliWrapper(address string) *cliWrapper { + return &cliWrapper{ + address: strings.TrimPrefix(address, "http://"), + } +} + +func (w *cliWrapper) withTxn(tx datastore.Txn) *cliWrapper { + return &cliWrapper{ + address: w.address, + txValue: fmt.Sprintf("%d", tx.ID()), + } +} + +func (w *cliWrapper) execute(ctx context.Context, args []string) ([]byte, error) { + stdOut, stdErr, err := w.executeStream(ctx, args) + if err != nil { + return nil, err + } + stdOutData, err := io.ReadAll(stdOut) + if err != nil { + return nil, err + } + stdErrData, err := io.ReadAll(stdErr) + if err != nil { + return nil, err + } + if len(stdErrData) != 0 { + return nil, fmt.Errorf("%s", stdErrData) + } + return stdOutData, nil +} + +func (w *cliWrapper) executeStream(ctx context.Context, args []string) (io.ReadCloser, io.ReadCloser, error) { + stdOutRead, stdOutWrite := io.Pipe() + stdErrRead, stdErrWrite := io.Pipe() + + if w.txValue != "" { + args = append(args, "--tx", w.txValue) + } + args = append(args, "--url", w.address) + + cmd := cli.NewDefraCommand(config.DefaultConfig()) + cmd.SetOut(stdOutWrite) + cmd.SetErr(stdErrWrite) + cmd.SetArgs(args) + + cmd.SilenceErrors = true + cmd.SilenceUsage = true + + go func() { + err := cmd.Execute() + stdOutWrite.CloseWithError(err) + stdErrWrite.CloseWithError(err) + }() + + return stdOutRead, stdErrRead, nil +} diff --git a/tests/clients/cli/wrapper_collection.go b/tests/clients/cli/wrapper_collection.go new file mode 100644 index 0000000000..0ce3c92836 --- /dev/null +++ b/tests/clients/cli/wrapper_collection.go @@ -0,0 +1,409 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "encoding/json" + "fmt" + "strings" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" + "github.com/sourcenetwork/defradb/datastore" + "github.com/sourcenetwork/defradb/errors" + "github.com/sourcenetwork/defradb/http" +) + +var _ client.Collection = (*Collection)(nil) + +type Collection struct { + cmd *cliWrapper + def client.CollectionDefinition +} + +func (c *Collection) Description() client.CollectionDescription { + return c.def.Description +} + +func (c *Collection) Name() string { + return c.Description().Name +} + +func (c *Collection) Schema() client.SchemaDescription { + return c.def.Schema +} + +func (c *Collection) ID() uint32 { + return c.Description().ID +} + +func (c *Collection) SchemaRoot() string { + return c.Schema().Root +} + +func (c *Collection) Definition() client.CollectionDefinition { + return c.def +} + +func (c *Collection) Create(ctx context.Context, doc *client.Document) error { + args := []string{"client", "collection", "create"} + args = append(args, "--name", c.Description().Name) + + // We must call this here, else the doc key on the given object will not match + // that of the document saved in the database + err := doc.RemapAliasFieldsAndDockey(c.Schema().Fields) + if err != nil { + return err + } + document, err := doc.String() + if err != nil { + return err + } + args = append(args, string(document)) + + _, err = c.cmd.execute(ctx, args) + if err != nil { + return err + } + doc.Clean() + return nil +} + +func (c *Collection) CreateMany(ctx context.Context, docs []*client.Document) error { + args := []string{"client", "collection", "create"} + args = append(args, "--name", c.Description().Name) + + docMapList := make([]map[string]any, len(docs)) + for i, doc := range docs { + // We must call this here, else the doc key on the given object will not match + // that of the document saved in the database + err := doc.RemapAliasFieldsAndDockey(c.Schema().Fields) + if err != nil { + return err + } + docMap, err := doc.ToMap() + if err != nil { + return err + } + docMapList[i] = docMap + } + documents, err := json.Marshal(docMapList) + if err != nil { + return err + } + args = append(args, string(documents)) + + _, err = c.cmd.execute(ctx, args) + if err != nil { + return err + } + for _, doc := range docs { + doc.Clean() + } + return nil +} + +func (c *Collection) Update(ctx context.Context, doc *client.Document) error { + args := []string{"client", "collection", "update"} + args = append(args, "--name", c.Description().Name) + args = append(args, "--key", doc.Key().String()) + + document, err := doc.ToJSONPatch() + if err != nil { + return err + } + args = append(args, string(document)) + + _, err = c.cmd.execute(ctx, args) + if err != nil { + return err + } + doc.Clean() + return nil +} + +func (c *Collection) Save(ctx context.Context, doc *client.Document) error { + _, err := c.Get(ctx, doc.Key(), true) + if err == nil { + return c.Update(ctx, doc) + } + if errors.Is(err, client.ErrDocumentNotFound) { + return c.Create(ctx, doc) + } + return err +} + +func (c *Collection) Delete(ctx context.Context, docKey client.DocKey) (bool, error) { + res, err := c.DeleteWithKey(ctx, docKey) + if err != nil { + return false, err + } + return res.Count == 1, nil +} + +func (c *Collection) Exists(ctx context.Context, docKey client.DocKey) (bool, error) { + _, err := c.Get(ctx, docKey, false) + if err != nil { + return false, err + } + return true, nil +} + +func (c *Collection) UpdateWith(ctx context.Context, target any, updater string) (*client.UpdateResult, error) { + switch t := target.(type) { + case string, map[string]any, *request.Filter: + return c.UpdateWithFilter(ctx, t, updater) + case client.DocKey: + return c.UpdateWithKey(ctx, t, updater) + case []client.DocKey: + return c.UpdateWithKeys(ctx, t, updater) + default: + return nil, client.ErrInvalidUpdateTarget + } +} + +func (c *Collection) updateWith( + ctx context.Context, + args []string, +) (*client.UpdateResult, error) { + data, err := c.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var res client.UpdateResult + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return &res, nil +} + +func (c *Collection) UpdateWithFilter( + ctx context.Context, + filter any, + updater string, +) (*client.UpdateResult, error) { + args := []string{"client", "collection", "update"} + args = append(args, "--name", c.Description().Name) + args = append(args, "--updater", updater) + + filterJSON, err := json.Marshal(filter) + if err != nil { + return nil, err + } + args = append(args, "--filter", string(filterJSON)) + + return c.updateWith(ctx, args) +} + +func (c *Collection) UpdateWithKey( + ctx context.Context, + key client.DocKey, + updater string, +) (*client.UpdateResult, error) { + args := []string{"client", "collection", "update"} + args = append(args, "--name", c.Description().Name) + args = append(args, "--key", key.String()) + args = append(args, "--updater", updater) + + return c.updateWith(ctx, args) +} + +func (c *Collection) UpdateWithKeys( + ctx context.Context, + docKeys []client.DocKey, + updater string, +) (*client.UpdateResult, error) { + args := []string{"client", "collection", "update"} + args = append(args, "--name", c.Description().Name) + args = append(args, "--updater", updater) + + keys := make([]string, len(docKeys)) + for i, v := range docKeys { + keys[i] = v.String() + } + args = append(args, "--key", strings.Join(keys, ",")) + + return c.updateWith(ctx, args) +} + +func (c *Collection) DeleteWith(ctx context.Context, target any) (*client.DeleteResult, error) { + switch t := target.(type) { + case string, map[string]any, *request.Filter: + return c.DeleteWithFilter(ctx, t) + case client.DocKey: + return c.DeleteWithKey(ctx, t) + case []client.DocKey: + return c.DeleteWithKeys(ctx, t) + default: + return nil, client.ErrInvalidDeleteTarget + } +} + +func (c *Collection) deleteWith( + ctx context.Context, + args []string, +) (*client.DeleteResult, error) { + data, err := c.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var res client.DeleteResult + if err := json.Unmarshal(data, &res); err != nil { + return nil, err + } + return &res, nil +} + +func (c *Collection) DeleteWithFilter(ctx context.Context, filter any) (*client.DeleteResult, error) { + args := []string{"client", "collection", "delete"} + args = append(args, "--name", c.Description().Name) + + filterJSON, err := json.Marshal(filter) + if err != nil { + return nil, err + } + args = append(args, "--filter", string(filterJSON)) + + return c.deleteWith(ctx, args) +} + +func (c *Collection) DeleteWithKey(ctx context.Context, docKey client.DocKey) (*client.DeleteResult, error) { + args := []string{"client", "collection", "delete"} + args = append(args, "--name", c.Description().Name) + args = append(args, "--key", docKey.String()) + + return c.deleteWith(ctx, args) +} + +func (c *Collection) DeleteWithKeys(ctx context.Context, docKeys []client.DocKey) (*client.DeleteResult, error) { + args := []string{"client", "collection", "delete"} + args = append(args, "--name", c.Description().Name) + + keys := make([]string, len(docKeys)) + for i, v := range docKeys { + keys[i] = v.String() + } + args = append(args, "--key", strings.Join(keys, ",")) + + return c.deleteWith(ctx, args) +} + +func (c *Collection) Get(ctx context.Context, key client.DocKey, showDeleted bool) (*client.Document, error) { + args := []string{"client", "collection", "get"} + args = append(args, "--name", c.Description().Name) + args = append(args, key.String()) + + if showDeleted { + args = append(args, "--show-deleted") + } + + data, err := c.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var docMap map[string]any + if err := json.Unmarshal(data, &docMap); err != nil { + return nil, err + } + return client.NewDocFromMap(docMap) +} + +func (c *Collection) WithTxn(tx datastore.Txn) client.Collection { + return &Collection{ + cmd: c.cmd.withTxn(tx), + def: c.def, + } +} + +func (c *Collection) GetAllDocKeys(ctx context.Context) (<-chan client.DocKeysResult, error) { + args := []string{"client", "collection", "keys"} + args = append(args, "--name", c.Description().Name) + + stdOut, _, err := c.cmd.executeStream(ctx, args) + if err != nil { + return nil, err + } + docKeyCh := make(chan client.DocKeysResult) + + go func() { + dec := json.NewDecoder(stdOut) + defer close(docKeyCh) + + for { + var res http.DocKeyResult + if err := dec.Decode(&res); err != nil { + return + } + key, err := client.NewDocKeyFromString(res.Key) + if err != nil { + return + } + docKey := client.DocKeysResult{ + Key: key, + } + if res.Error != "" { + docKey.Err = fmt.Errorf(res.Error) + } + docKeyCh <- docKey + } + }() + + return docKeyCh, nil +} + +func (c *Collection) CreateIndex( + ctx context.Context, + indexDesc client.IndexDescription, +) (index client.IndexDescription, err error) { + args := []string{"client", "index", "create"} + args = append(args, "--collection", c.Description().Name) + args = append(args, "--name", indexDesc.Name) + + fields := make([]string, len(indexDesc.Fields)) + for i := range indexDesc.Fields { + fields[i] = indexDesc.Fields[i].Name + } + args = append(args, "--fields", strings.Join(fields, ",")) + + data, err := c.cmd.execute(ctx, args) + if err != nil { + return index, err + } + if err := json.Unmarshal(data, &index); err != nil { + return index, err + } + return index, nil +} + +func (c *Collection) DropIndex(ctx context.Context, indexName string) error { + args := []string{"client", "index", "drop"} + args = append(args, "--collection", c.Description().Name) + args = append(args, "--name", indexName) + + _, err := c.cmd.execute(ctx, args) + return err +} + +func (c *Collection) GetIndexes(ctx context.Context) ([]client.IndexDescription, error) { + args := []string{"client", "index", "list"} + args = append(args, "--collection", c.Description().Name) + + data, err := c.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var indexes []client.IndexDescription + if err := json.Unmarshal(data, &indexes); err != nil { + return nil, err + } + return indexes, nil +} diff --git a/tests/clients/cli/wrapper_lens.go b/tests/clients/cli/wrapper_lens.go new file mode 100644 index 0000000000..679a792662 --- /dev/null +++ b/tests/clients/cli/wrapper_lens.go @@ -0,0 +1,145 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "encoding/json" + + "github.com/sourcenetwork/immutable/enumerable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/datastore" +) + +var _ client.LensRegistry = (*LensRegistry)(nil) + +type LensRegistry struct { + cmd *cliWrapper +} + +func (w *LensRegistry) WithTxn(tx datastore.Txn) client.LensRegistry { + return &LensRegistry{w.cmd.withTxn(tx)} +} + +func (w *LensRegistry) SetMigration(ctx context.Context, config client.LensConfig) error { + args := []string{"client", "schema", "migration", "set"} + args = append(args, config.SourceSchemaVersionID) + args = append(args, config.DestinationSchemaVersionID) + + lensCfg, err := json.Marshal(config.Lens) + if err != nil { + return err + } + args = append(args, string(lensCfg)) + + _, err = w.cmd.execute(ctx, args) + return err +} + +func (w *LensRegistry) ReloadLenses(ctx context.Context) error { + args := []string{"client", "schema", "migration", "reload"} + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *LensRegistry) MigrateUp( + ctx context.Context, + src enumerable.Enumerable[map[string]any], + schemaVersionID string, +) (enumerable.Enumerable[map[string]any], error) { + args := []string{"client", "schema", "migration", "up"} + args = append(args, "--version", schemaVersionID) + + var srcData []map[string]any + err := enumerable.ForEach(src, func(item map[string]any) { + srcData = append(srcData, item) + }) + if err != nil { + return nil, err + } + srcJSON, err := json.Marshal(srcData) + if err != nil { + return nil, err + } + args = append(args, string(srcJSON)) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var out enumerable.Enumerable[map[string]any] + if err := json.Unmarshal(data, &out); err != nil { + return nil, err + } + return out, nil +} + +func (w *LensRegistry) MigrateDown( + ctx context.Context, + src enumerable.Enumerable[map[string]any], + schemaVersionID string, +) (enumerable.Enumerable[map[string]any], error) { + args := []string{"client", "schema", "migration", "down"} + args = append(args, "--version", schemaVersionID) + + var srcData []map[string]any + err := enumerable.ForEach(src, func(item map[string]any) { + srcData = append(srcData, item) + }) + if err != nil { + return nil, err + } + srcJSON, err := json.Marshal(srcData) + if err != nil { + return nil, err + } + args = append(args, string(srcJSON)) + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var out enumerable.Enumerable[map[string]any] + if err := json.Unmarshal(data, &out); err != nil { + return nil, err + } + return out, nil +} + +func (w *LensRegistry) Config(ctx context.Context) ([]client.LensConfig, error) { + args := []string{"client", "schema", "migration", "get"} + + data, err := w.cmd.execute(ctx, args) + if err != nil { + return nil, err + } + var cfgs []client.LensConfig + if err := json.Unmarshal(data, &cfgs); err != nil { + return nil, err + } + return cfgs, nil +} + +func (w *LensRegistry) HasMigration(ctx context.Context, schemaVersionID string) (bool, error) { + cfgs, err := w.Config(ctx) + if err != nil { + return false, err + } + found := false + for _, cfg := range cfgs { + if cfg.SourceSchemaVersionID == schemaVersionID { + found = true + } + } + return found, nil +} diff --git a/tests/clients/cli/wrapper_tx.go b/tests/clients/cli/wrapper_tx.go new file mode 100644 index 0000000000..33bfe43bee --- /dev/null +++ b/tests/clients/cli/wrapper_tx.go @@ -0,0 +1,80 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package cli + +import ( + "context" + "fmt" + + "github.com/sourcenetwork/defradb/datastore" +) + +var _ datastore.Txn = (*Transaction)(nil) + +type Transaction struct { + tx datastore.Txn + cmd *cliWrapper +} + +func (w *Transaction) ID() uint64 { + return w.tx.ID() +} + +func (w *Transaction) Commit(ctx context.Context) error { + args := []string{"client", "tx", "commit"} + args = append(args, fmt.Sprintf("%d", w.tx.ID())) + + _, err := w.cmd.execute(ctx, args) + return err +} + +func (w *Transaction) Discard(ctx context.Context) { + args := []string{"client", "tx", "discard"} + args = append(args, fmt.Sprintf("%d", w.tx.ID())) + + w.cmd.execute(ctx, args) //nolint:errcheck +} + +func (w *Transaction) OnSuccess(fn func()) { + w.tx.OnSuccess(fn) +} + +func (w *Transaction) OnError(fn func()) { + w.tx.OnError(fn) +} + +func (w *Transaction) OnDiscard(fn func()) { + w.tx.OnDiscard(fn) +} + +func (w *Transaction) Rootstore() datastore.DSReaderWriter { + return w.tx.Rootstore() +} + +func (w *Transaction) Datastore() datastore.DSReaderWriter { + return w.tx.Datastore() +} + +func (w *Transaction) Headstore() datastore.DSReaderWriter { + return w.tx.Headstore() +} + +func (w *Transaction) Peerstore() datastore.DSBatching { + return w.tx.Peerstore() +} + +func (w *Transaction) DAGstore() datastore.DAGStore { + return w.tx.DAGstore() +} + +func (w *Transaction) Systemstore() datastore.DSReaderWriter { + return w.tx.Systemstore() +} diff --git a/tests/clients/clients.go b/tests/clients/clients.go new file mode 100644 index 0000000000..10df14212f --- /dev/null +++ b/tests/clients/clients.go @@ -0,0 +1,26 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package clients + +import ( + "github.com/libp2p/go-libp2p/core/peer" + + "github.com/sourcenetwork/defradb/client" +) + +// Client implements the P2P interface along with a few other methods +// required for testing. +type Client interface { + client.P2P + Bootstrap([]peer.AddrInfo) + WaitForPushLogByPeerEvent(peer.ID) error + WaitForPushLogFromPeerEvent(peer.ID) error +} diff --git a/http/wrapper.go b/tests/clients/http/wrapper.go similarity index 53% rename from http/wrapper.go rename to tests/clients/http/wrapper.go index 558dc79474..ab7975a525 100644 --- a/http/wrapper.go +++ b/tests/clients/http/wrapper.go @@ -12,44 +12,53 @@ package http import ( "context" - "fmt" "net/http/httptest" blockstore "github.com/ipfs/boxo/blockstore" + "github.com/libp2p/go-libp2p/core/peer" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" "github.com/sourcenetwork/defradb/events" + "github.com/sourcenetwork/defradb/http" + "github.com/sourcenetwork/defradb/net" ) -var _ client.DB = (*Wrapper)(nil) +var _ client.P2P = (*Wrapper)(nil) // Wrapper combines an HTTP client and server into a // single struct that implements the client.DB interface. type Wrapper struct { - db client.DB - server *Server - client *Client + node *net.Node + handler *http.Handler + client *http.Client httpServer *httptest.Server } -func NewWrapper(db client.DB) (*Wrapper, error) { - server := NewServer(db) - httpServer := httptest.NewServer(server) +func NewWrapper(node *net.Node) (*Wrapper, error) { + handler, err := http.NewHandler(node, http.ServerOptions{}) + if err != nil { + return nil, err + } - client, err := NewClient(httpServer.URL) + httpServer := httptest.NewServer(handler) + client, err := http.NewClient(httpServer.URL) if err != nil { return nil, err } return &Wrapper{ - db, - server, + node, + handler, client, httpServer, }, nil } +func (w *Wrapper) PeerInfo() peer.AddrInfo { + return w.client.PeerInfo() +} + func (w *Wrapper) SetReplicator(ctx context.Context, rep client.Replicator) error { return w.client.SetReplicator(ctx, rep) } @@ -62,12 +71,12 @@ func (w *Wrapper) GetAllReplicators(ctx context.Context) ([]client.Replicator, e return w.client.GetAllReplicators(ctx) } -func (w *Wrapper) AddP2PCollection(ctx context.Context, collectionID string) error { - return w.client.AddP2PCollection(ctx, collectionID) +func (w *Wrapper) AddP2PCollections(ctx context.Context, collectionIDs []string) error { + return w.client.AddP2PCollections(ctx, collectionIDs) } -func (w *Wrapper) RemoveP2PCollection(ctx context.Context, collectionID string) error { - return w.client.RemoveP2PCollection(ctx, collectionID) +func (w *Wrapper) RemoveP2PCollections(ctx context.Context, collectionIDs []string) error { + return w.client.RemoveP2PCollections(ctx, collectionIDs) } func (w *Wrapper) GetAllP2PCollections(ctx context.Context) ([]string, error) { @@ -86,8 +95,12 @@ func (w *Wrapper) AddSchema(ctx context.Context, schema string) ([]client.Collec return w.client.AddSchema(ctx, schema) } -func (w *Wrapper) PatchSchema(ctx context.Context, patch string) error { - return w.client.PatchSchema(ctx, patch) +func (w *Wrapper) PatchSchema(ctx context.Context, patch string, setAsDefaultVersion bool) error { + return w.client.PatchSchema(ctx, patch, setAsDefaultVersion) +} + +func (w *Wrapper) SetDefaultSchemaVersion(ctx context.Context, schemaVersionID string) error { + return w.client.SetDefaultSchemaVersion(ctx, schemaVersionID) } func (w *Wrapper) SetMigration(ctx context.Context, config client.LensConfig) error { @@ -102,18 +115,34 @@ func (w *Wrapper) GetCollectionByName(ctx context.Context, name client.Collectio return w.client.GetCollectionByName(ctx, name) } -func (w *Wrapper) GetCollectionBySchemaID(ctx context.Context, schemaId string) (client.Collection, error) { - return w.client.GetCollectionBySchemaID(ctx, schemaId) +func (w *Wrapper) GetCollectionsBySchemaRoot(ctx context.Context, schemaRoot string) ([]client.Collection, error) { + return w.client.GetCollectionsBySchemaRoot(ctx, schemaRoot) } -func (w *Wrapper) GetCollectionByVersionID(ctx context.Context, versionId string) (client.Collection, error) { - return w.client.GetCollectionByVersionID(ctx, versionId) +func (w *Wrapper) GetCollectionsByVersionID(ctx context.Context, versionId string) ([]client.Collection, error) { + return w.client.GetCollectionsByVersionID(ctx, versionId) } func (w *Wrapper) GetAllCollections(ctx context.Context) ([]client.Collection, error) { return w.client.GetAllCollections(ctx) } +func (w *Wrapper) GetSchemasByName(ctx context.Context, name string) ([]client.SchemaDescription, error) { + return w.client.GetSchemasByName(ctx, name) +} + +func (w *Wrapper) GetSchemaByVersionID(ctx context.Context, versionID string) (client.SchemaDescription, error) { + return w.client.GetSchemaByVersionID(ctx, versionID) +} + +func (w *Wrapper) GetSchemasByRoot(ctx context.Context, root string) ([]client.SchemaDescription, error) { + return w.client.GetSchemasByRoot(ctx, root) +} + +func (w *Wrapper) GetAllSchemas(ctx context.Context) ([]client.SchemaDescription, error) { + return w.client.GetAllSchemas(ctx) +} + func (w *Wrapper) GetAllIndexes(ctx context.Context) (map[client.CollectionName][]client.IndexDescription, error) { return w.client.GetAllIndexes(ctx) } @@ -127,11 +156,11 @@ func (w *Wrapper) NewTxn(ctx context.Context, readOnly bool) (datastore.Txn, err if err != nil { return nil, err } - server, ok := w.server.txs.Load(client.ID()) - if !ok { - return nil, fmt.Errorf("failed to get server transaction") + server, err := w.handler.Transaction(client.ID()) + if err != nil { + return nil, err } - return &TxWrapper{server.(datastore.Txn), client}, nil + return &TxWrapper{server, client}, nil } func (w *Wrapper) NewConcurrentTxn(ctx context.Context, readOnly bool) (datastore.Txn, error) { @@ -139,11 +168,11 @@ func (w *Wrapper) NewConcurrentTxn(ctx context.Context, readOnly bool) (datastor if err != nil { return nil, err } - server, ok := w.server.txs.Load(client.ID()) - if !ok { - return nil, fmt.Errorf("failed to get server transaction") + server, err := w.handler.Transaction(client.ID()) + if err != nil { + return nil, err } - return &TxWrapper{server.(datastore.Txn), client}, nil + return &TxWrapper{server, client}, nil } func (w *Wrapper) WithTxn(tx datastore.Txn) client.Store { @@ -151,27 +180,43 @@ func (w *Wrapper) WithTxn(tx datastore.Txn) client.Store { } func (w *Wrapper) Root() datastore.RootStore { - return w.db.Root() + return w.node.Root() } func (w *Wrapper) Blockstore() blockstore.Blockstore { - return w.db.Blockstore() + return w.node.Blockstore() +} + +func (w *Wrapper) Peerstore() datastore.DSBatching { + return w.node.Peerstore() } -func (w *Wrapper) Close(ctx context.Context) { +func (w *Wrapper) Close() { w.httpServer.CloseClientConnections() w.httpServer.Close() - w.db.Close(ctx) + w.node.Close() } func (w *Wrapper) Events() events.Events { - return w.db.Events() + return w.node.Events() } func (w *Wrapper) MaxTxnRetries() int { - return w.db.MaxTxnRetries() + return w.node.MaxTxnRetries() } func (w *Wrapper) PrintDump(ctx context.Context) error { - return w.db.PrintDump(ctx) + return w.node.PrintDump(ctx) +} + +func (w *Wrapper) Bootstrap(addrs []peer.AddrInfo) { + w.node.Bootstrap(addrs) +} + +func (w *Wrapper) WaitForPushLogByPeerEvent(id peer.ID) error { + return w.node.WaitForPushLogByPeerEvent(id) +} + +func (w *Wrapper) WaitForPushLogFromPeerEvent(id peer.ID) error { + return w.node.WaitForPushLogFromPeerEvent(id) } diff --git a/http/wrapper_tx.go b/tests/clients/http/wrapper_tx.go similarity index 94% rename from http/wrapper_tx.go rename to tests/clients/http/wrapper_tx.go index 7c77b938f5..fe63a9ded5 100644 --- a/http/wrapper_tx.go +++ b/tests/clients/http/wrapper_tx.go @@ -61,6 +61,10 @@ func (w *TxWrapper) Headstore() datastore.DSReaderWriter { return w.server.Headstore() } +func (w *TxWrapper) Peerstore() datastore.DSBatching { + return w.server.Peerstore() +} + func (w *TxWrapper) DAGstore() datastore.DAGStore { return w.server.DAGstore() } diff --git a/tests/integration/backup/one_to_many/export_test.go b/tests/integration/backup/one_to_many/export_test.go index cbba06162b..328d48bd6d 100644 --- a/tests/integration/backup/one_to_many/export_test.go +++ b/tests/integration/backup/one_to_many/export_test.go @@ -57,7 +57,7 @@ func TestBackupExport_AllCollectionsMultipleDocsAndDocUpdate_NoError(t *testing. Doc: `{"age": 31}`, }, testUtils.BackupExport{ - ExpectedContent: `{"Book":[{"_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + ExpectedContent: `{"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}],"Book":[{"_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}]}`, }, }, } @@ -90,7 +90,7 @@ func TestBackupExport_AllCollectionsMultipleDocsAndMultipleDocUpdate_NoError(t * Doc: `{"age": 31}`, }, testUtils.BackupExport{ - ExpectedContent: `{"Book":[{"_key":"bae-4399f189-138d-5d49-9e25-82e78463677b","_newKey":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"Game of chains"},{"_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + ExpectedContent: `{"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}],"Book":[{"_key":"bae-4399f189-138d-5d49-9e25-82e78463677b","_newKey":"bae-78a40f28-a4b8-5dca-be44-392b0f96d0ff","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"Game of chains"},{"_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}]}`, }, }, } diff --git a/tests/integration/backup/one_to_many/import_test.go b/tests/integration/backup/one_to_many/import_test.go index 5cbae18416..f3c189365d 100644 --- a/tests/integration/backup/one_to_many/import_test.go +++ b/tests/integration/backup/one_to_many/import_test.go @@ -43,15 +43,15 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollections_NoError(t *testing Results: []map[string]any{ { "name": "Smith", - "age": uint64(31), + "age": int64(31), }, { "name": "Bob", - "age": uint64(32), + "age": int64(32), }, { "name": "John", - "age": uint64(30), + "age": int64(30), }, }, }, @@ -123,11 +123,11 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollectionsAndUpdatedDocs_NoEr Results: []map[string]any{ { "name": "Bob", - "age": uint64(31), + "age": int64(31), }, { "name": "John", - "age": uint64(31), + "age": int64(31), }, }, }, diff --git a/tests/integration/backup/one_to_one/export_test.go b/tests/integration/backup/one_to_one/export_test.go index 48700907a5..c5bb798643 100644 --- a/tests/integration/backup/one_to_one/export_test.go +++ b/tests/integration/backup/one_to_one/export_test.go @@ -57,7 +57,7 @@ func TestBackupExport_AllCollectionsMultipleDocsAndDocUpdate_NoError(t *testing. Doc: `{"age": 31}`, }, testUtils.BackupExport{ - ExpectedContent: `{"Book":[{"_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + ExpectedContent: `{"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}],"Book":[{"_key":"bae-5cf2fec3-d8ed-50d5-8286-39109853d2da","_newKey":"bae-edeade01-2d21-5d6d-aadf-efc5a5279de5","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","name":"John and the sourcerers' stone"}]}`, }, }, } @@ -101,7 +101,7 @@ func TestBackupExport_DoubleReletionship_NoError(t *testing.T) { Doc: `{"age": 31}`, }, testUtils.BackupExport{ - ExpectedContent: `{"Book":[{"_key":"bae-45b1def4-4e63-5a93-a1b8-f7b08e682164","_newKey":"bae-add2ccfe-84a1-519c-ab7d-c54b43909532","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","favourite_id":"bae-0648f44e-74e8-593b-a662-3310ec278927","name":"John and the sourcerers' stone"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + ExpectedContent: `{"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}],"Book":[{"_key":"bae-45b1def4-4e63-5a93-a1b8-f7b08e682164","_newKey":"bae-add2ccfe-84a1-519c-ab7d-c54b43909532","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","favourite_id":"bae-0648f44e-74e8-593b-a662-3310ec278927","name":"John and the sourcerers' stone"}]}`, }, }, } @@ -149,7 +149,7 @@ func TestBackupExport_DoubleReletionshipWithUpdate_NoError(t *testing.T) { Doc: `{"age": 31}`, }, testUtils.BackupExport{ - ExpectedContent: `{"Book":[{"_key":"bae-45b1def4-4e63-5a93-a1b8-f7b08e682164","_newKey":"bae-add2ccfe-84a1-519c-ab7d-c54b43909532","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","favourite_id":"bae-0648f44e-74e8-593b-a662-3310ec278927","name":"John and the sourcerers' stone"},{"_key":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","_newKey":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","name":"Game of chains"}],"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}]}`, + ExpectedContent: `{"User":[{"_key":"bae-0648f44e-74e8-593b-a662-3310ec278927","_newKey":"bae-0648f44e-74e8-593b-a662-3310ec278927","age":31,"name":"Bob"},{"_key":"bae-e933420a-988a-56f8-8952-6c245aebd519","_newKey":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","age":31,"name":"John"}],"Book":[{"_key":"bae-45b1def4-4e63-5a93-a1b8-f7b08e682164","_newKey":"bae-add2ccfe-84a1-519c-ab7d-c54b43909532","author_id":"bae-807ea028-6c13-5f86-a72b-46e8b715a162","favourite_id":"bae-0648f44e-74e8-593b-a662-3310ec278927","name":"John and the sourcerers' stone"},{"_key":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","_newKey":"bae-da7f2d88-05c4-528a-846a-0d18ab26603b","name":"Game of chains"}]}`, }, }, } diff --git a/tests/integration/backup/one_to_one/import_test.go b/tests/integration/backup/one_to_one/import_test.go index 85c63f9e99..f827c81670 100644 --- a/tests/integration/backup/one_to_one/import_test.go +++ b/tests/integration/backup/one_to_one/import_test.go @@ -43,15 +43,15 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollections_NoError(t *testing Results: []map[string]any{ { "name": "Smith", - "age": uint64(31), + "age": int64(31), }, { "name": "Bob", - "age": uint64(32), + "age": int64(32), }, { "name": "John", - "age": uint64(30), + "age": int64(30), }, }, }, @@ -117,11 +117,11 @@ func TestBackupImport_WithMultipleNoKeyAndMultipleCollectionsAndUpdatedDocs_NoEr Results: []map[string]any{ { "name": "Bob", - "age": uint64(31), + "age": int64(31), }, { "name": "John", - "age": uint64(31), + "age": int64(31), }, }, }, diff --git a/tests/integration/backup/simple/import_test.go b/tests/integration/backup/simple/import_test.go index c6e98a29e8..d7f6428725 100644 --- a/tests/integration/backup/simple/import_test.go +++ b/tests/integration/backup/simple/import_test.go @@ -33,7 +33,7 @@ func TestBackupImport_Simple_NoError(t *testing.T) { Results: []map[string]any{ { "name": "John", - "age": uint64(30), + "age": int64(30), }, }, }, @@ -103,7 +103,7 @@ func TestBackupImport_WithNoKeys_NoError(t *testing.T) { Results: []map[string]any{ { "name": "John", - "age": uint64(30), + "age": int64(30), }, }, }, @@ -134,15 +134,15 @@ func TestBackupImport_WithMultipleNoKeys_NoError(t *testing.T) { Results: []map[string]any{ { "name": "Smith", - "age": uint64(31), + "age": int64(31), }, { "name": "Bob", - "age": uint64(32), + "age": int64(32), }, { "name": "John", - "age": uint64(30), + "age": int64(30), }, }, }, diff --git a/tests/integration/change_detector.go b/tests/integration/change_detector.go deleted file mode 100644 index 15f17fb16b..0000000000 --- a/tests/integration/change_detector.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package tests - -import ( - "context" - "fmt" - "io/fs" - "math/rand" - "os" - "os/exec" - "path" - "runtime" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -var skip bool - -func IsDetectingDbChanges() bool { - return DetectDbChanges -} - -// Returns true if test should pass early -func DetectDbChangesPreTestChecks( - t *testing.T, - collectionNames []string, -) bool { - if skip { - t.SkipNow() - } - - if previousTestCaseTestName == t.Name() { - // The database format changer currently only supports running the first test - // case, if a second case is detected we return early - return true - } - previousTestCaseTestName = t.Name() - - if areDatabaseFormatChangesDocumented { - // If we are checking that database formatting changes have been made and - // documented, and changes are documented, then the tests can all pass. - return true - } - - if len(collectionNames) == 0 { - // If the test doesn't specify any collections, then we can't use it to check - // the database format, so we skip it - t.SkipNow() - } - - if !SetupOnly { - dbDirectory := path.Join(rootDatabaseDir, t.Name()) - _, err := os.Stat(dbDirectory) - if os.IsNotExist(err) { - // This is a new test that does not exist in the target branch, we should - // skip it. - t.SkipNow() - } else { - require.NoError(t, err) - } - } - - return false -} - -func detectDbChangesInit(repository string, targetBranch string) { - badgerFile = true - badgerInMemory = false - - if SetupOnly { - // Only the primary test process should perform the setup below - return - } - - defraTempDir := path.Join(os.TempDir(), "defradb") - changeDetectorTempDir := path.Join(defraTempDir, "tests", "changeDetector") - - latestTargetCommitHash := getLatestCommit(repository, targetBranch) - detectDbChangesCodeDir = path.Join(changeDetectorTempDir, "code", latestTargetCommitHash) - r := rand.New(rand.NewSource(time.Now().Unix())) - randNumber := r.Int() - dbsDir := path.Join(changeDetectorTempDir, "dbs", fmt.Sprint(randNumber)) - - testPackagePath, isIntegrationTest := getTestPackagePath() - if !isIntegrationTest { - skip = true - return - } - rootDatabaseDir = path.Join(dbsDir, strings.ReplaceAll(testPackagePath, "/", "_")) - - _, err := os.Stat(detectDbChangesCodeDir) - // Warning - there is a race condition here, where if running multiple packages in - // parallel (as per default) against a new target commit multiple test pacakges will - // try and clone the target branch at the same time (and will fail). - // This could be solved by using a file lock or similar, however running the change - // detector in parallel is significantly slower than running it serially due to machine - // resource constraints, so I am leaving the race condition in and recommending running - // the change detector with the CLI args `-p 1` - if os.IsNotExist(err) { - cloneCmd := exec.Command( - "git", - "clone", - "-b", - targetBranch, - "--single-branch", - repository, - detectDbChangesCodeDir, - ) - cloneCmd.Stdout = os.Stdout - cloneCmd.Stderr = os.Stderr - err := cloneCmd.Run() - if err != nil { - panic(err) - } - } else if err != nil { - panic(err) - } else { - // Cache must be cleaned, or it might not run the test setup! - // Note: this also acts as a race condition if multiple build are running against the - // same target if this happens some tests might be silently skipped if the - // child-setup fails. Currently I think it is worth it for slightly faster build - // times, but feel very free to change this! - goTestCacheCmd := exec.Command("go", "clean", "-testcache") - goTestCacheCmd.Dir = detectDbChangesCodeDir - err = goTestCacheCmd.Run() - if err != nil { - panic(err) - } - } - - areDatabaseFormatChangesDocumented = checkIfDatabaseFormatChangesAreDocumented() - if areDatabaseFormatChangesDocumented { - // Dont bother doing anything if the changes are documented - return - } - - targetTestPackage := detectDbChangesCodeDir + "/tests/integration/" + testPackagePath - - _, err = os.Stat(targetTestPackage) - if os.IsNotExist(err) { - // This is a new test package, and thus the change detector is not applicable - // as the tests do not exist in the target branch. - skip = true - return - } else if err != nil { - panic(err) - } - - // If we are checking for database changes, and we are not seting up the database, - // then we must be in the main test process, and need to create a new process - // setting up the database for this test using the old branch We should not setup - // the database using the current branch/process - goTestCmd := exec.Command( - "go", - "test", - "./...", - "-v", - ) - - goTestCmd.Dir = targetTestPackage - goTestCmd.Env = os.Environ() - goTestCmd.Env = append( - goTestCmd.Env, - setupOnlyEnvName+"=true", - rootDBFilePathEnvName+"="+rootDatabaseDir, - ) - out, err := goTestCmd.Output() - if err != nil { - log.ErrorE(context.TODO(), string(out), err) - panic(err) - } -} - -// getTestPackagePath returns the path to the package currently under test, relative -// to `./tests/integration/`. Will return an empty string and false if the tests -// are not within that directory. -func getTestPackagePath() (string, bool) { - currentTestPackage, err := os.Getwd() - if err != nil { - panic(err) - } - - splitPath := strings.Split( - currentTestPackage, - "/tests/integration/", - ) - - if len(splitPath) != 2 { - return "", false - } - return splitPath[1], true -} - -func checkIfDatabaseFormatChangesAreDocumented() bool { - previousDbChangeFiles, targetDirFound := getDatabaseFormatDocumentation( - detectDbChangesCodeDir, - false, - ) - if !targetDirFound { - panic("Documentation directory not found") - } - - previousDbChanges := make(map[string]struct{}, len(previousDbChangeFiles)) - for _, f := range previousDbChangeFiles { - // Note: we assume flat directory for now - sub directories are not expanded - previousDbChanges[f.Name()] = struct{}{} - } - - _, thisFilePath, _, _ := runtime.Caller(0) - currentDbChanges, currentDirFound := getDatabaseFormatDocumentation(thisFilePath, true) - if !currentDirFound { - panic("Documentation directory not found") - } - - for _, f := range currentDbChanges { - if _, isChangeOld := previousDbChanges[f.Name()]; !isChangeOld { - // If there is a new file in the directory then the change - // has been documented and the test should pass - return true - } - } - - return false -} - -func getDatabaseFormatDocumentation(startPath string, allowDescend bool) ([]fs.DirEntry, bool) { - startInfo, err := os.Stat(startPath) - if err != nil { - panic(err) - } - - var currentDirectory string - if startInfo.IsDir() { - currentDirectory = startPath - } else { - currentDirectory = path.Dir(startPath) - } - - for { - directoryContents, err := os.ReadDir(currentDirectory) - if err != nil { - panic(err) - } - - for _, directoryItem := range directoryContents { - directoryItemPath := path.Join(currentDirectory, directoryItem.Name()) - if directoryItem.Name() == documentationDirectoryName { - probableFormatChangeDirectoryContents, err := os.ReadDir(directoryItemPath) - if err != nil { - panic(err) - } - for _, possibleDocumentationItem := range probableFormatChangeDirectoryContents { - if path.Ext(possibleDocumentationItem.Name()) == ".md" { - // If the directory's name matches the expected, and contains .md files - // we assume it is the documentation directory - return probableFormatChangeDirectoryContents, true - } - } - } else { - if directoryItem.IsDir() { - childContents, directoryFound := getDatabaseFormatDocumentation(directoryItemPath, false) - if directoryFound { - return childContents, true - } - } - } - } - - if allowDescend { - // If not found in this directory, continue down the path - currentDirectory = path.Dir(currentDirectory) - - if currentDirectory == "." || currentDirectory == "/" { - panic("Database documentation directory not found") - } - } else { - return []fs.DirEntry{}, false - } - } -} - -func getLatestCommit(repoName string, branchName string) string { - cmd := exec.Command("git", "ls-remote", repoName, "refs/heads/"+branchName) - result, err := cmd.Output() - if err != nil { - panic(err) - } - - // This is a tab, not a space! - seperator := "\t" - return strings.Split(string(result), seperator)[0] -} diff --git a/tests/integration/cli/client_backup_export_test.go b/tests/integration/cli/client_backup_export_test.go deleted file mode 100644 index 62f2677c7b..0000000000 --- a/tests/integration/cli/client_backup_export_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "os" - "testing" - - "github.com/stretchr/testify/require" -) - -func createUser(t *testing.T, conf DefraNodeConfig) { - _, _ = runDefraCommand(t, conf, []string{ - "client", "query", `mutation { create_User(data: "{\"name\": \"John\"}") { _key } }`, - }) -} - -func TestBackup_IfNoArgs_ShowUsage(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{"client", "backup"}) - assertContainsSubstring(t, stdout, "Usage:") -} - -func TestBackupExport_ForAllCollections_ShouldExport(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/test.json" - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "export", filepath, - }) - stopDefra() - - assertContainsSubstring(t, stdout, "success") - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - require.Equal( - t, - `{"User":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`, - string(b), - ) -} - -func TestBackupExport_ForUserCollection_ShouldExport(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/test.json" - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "export", filepath, "--collections", "User", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "success") - - b, err := os.ReadFile(filepath) - require.NoError(t, err) - require.Equal( - t, - `{"User":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`, - string(b), - ) -} - -func TestBackupExport_ForInvalidCollection_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/test.json" - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "export", filepath, "--collections", "Invalid", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "collection does not exist") -} - -func TestBackupExport_InvalidFilePath_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/some/test.json" - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "export", filepath, "--collections", "Invalid", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "invalid file path") -} diff --git a/tests/integration/cli/client_backup_import_test.go b/tests/integration/cli/client_backup_import_test.go deleted file mode 100644 index 8290dbe6de..0000000000 --- a/tests/integration/cli/client_backup_import_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "os" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestBackupImport_WithValidFile_ShouldImport(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - filepath := t.TempDir() + "/test.json" - - err := os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`), - 0644, - ) - require.NoError(t, err) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "import", filepath, - }) - stopDefra() - - assertContainsSubstring(t, stdout, "success") -} - -func TestBackupImport_WithExistingDoc_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/test.json" - - err := os.WriteFile( - filepath, - []byte(`{"User":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`), - 0644, - ) - require.NoError(t, err) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "import", filepath, - }) - stopDefra() - - assertContainsSubstring(t, stdout, "a document with the given dockey already exists") -} - -func TestBackupImport_ForInvalidCollection_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/test.json" - - err := os.WriteFile( - filepath, - []byte(`{"Invalid":[{"_key":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","_newKey":"bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad","name":"John"}]}`), - 0644, - ) - require.NoError(t, err) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "import", filepath, - }) - stopDefra() - - assertContainsSubstring(t, stdout, "failed to get collection: datastore: key not found. Name: Invalid") -} - -func TestBackupImport_InvalidFilePath_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - createUser(t, conf) - - filepath := t.TempDir() + "/some/test.json" - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "backup", "import", filepath, - }) - stopDefra() - - assertContainsSubstring(t, stdout, "invalid file path") -} diff --git a/tests/integration/cli/client_blocks_test.go b/tests/integration/cli/client_blocks_test.go deleted file mode 100644 index 08d1c22684..0000000000 --- a/tests/integration/cli/client_blocks_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import "testing" - -func TestClientBlocksEmpty(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{"client", "blocks"}) - assertContainsSubstring(t, stdout, "Usage:") -} - -func TestClientBlocksGetEmpty(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{"client", "blocks", "get"}) - assertContainsSubstring(t, stdout, "Usage:") -} - -func TestClientBlocksGetInvalidCID(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - stdout, _ := runDefraCommand(t, conf, []string{"client", "blocks", "get", "invalid-cid"}) - _ = stopDefra() - assertContainsSubstring(t, stdout, "\"errors\"") -} - -func TestClientBlocksGetNonExistentCID(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - stdout, _ := runDefraCommand(t, conf, []string{"client", "blocks", "get", "bafybeieelb43ol5e5jiick2p7k4p577ph72ecwcuowlhbops4hpz24zhz4"}) - _ = stopDefra() - assertContainsSubstring(t, stdout, "could not find") -} diff --git a/tests/integration/cli/client_index_create_test.go b/tests/integration/cli/client_index_create_test.go deleted file mode 100644 index 89d6a4a18a..0000000000 --- a/tests/integration/cli/client_index_create_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" -) - -func createUserCollection(t *testing.T, conf DefraNodeConfig) { - createCollection(t, conf, `type User { name: String }`) -} - -func createCollection(t *testing.T, conf DefraNodeConfig, colSchema string) { - fileName := schemaFileFixture(t, "schema.graphql", colSchema) - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fileName}) - assertContainsSubstring(t, stdout, "success") -} - -func TestIndex_IfNoArgs_ShowUsage(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{"client", "index"}) - assertContainsSubstring(t, stdout, "Usage:") -} - -func TestIndexCreate_IfNoArgs_ShowUsage(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{"client", "index", "create"}) - assertContainsSubstring(t, stderr, "Usage") -} - -func TestIndexCreate_IfNoFieldsArg_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "index", "create", - "--collection", "User", - }) - stopDefra() - - assertContainsSubstring(t, stderr, "missing argument") -} - -func TestIndexCreate_IfNoCollectionArg_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "index", "create", - "--fields", "Name", - }) - stopDefra() - - assertContainsSubstring(t, stderr, "missing argument") -} - -func TestIndexCreate_IfCollectionExists_ShouldCreateIndex(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "create", - "--collection", "User", - "--fields", "name", - "--name", "users_name_index", - }) - nodeLog := stopDefra() - - jsonResponse := `{"data":{"index":{"Name":"users_name_index","ID":1,"Fields":[{"Name":"name","Direction":"ASC"}]}}}` - assertContainsSubstring(t, stdout, jsonResponse) - assertNotContainsSubstring(t, stdout, "errors") - assertNotContainsSubstring(t, nodeLog, "errors") -} - -func TestIndexCreate_IfInternalError_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "create", - "--collection", "User", - "--fields", "Name", - "--name", "users_name_index", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "errors") -} diff --git a/tests/integration/cli/client_index_drop_test.go b/tests/integration/cli/client_index_drop_test.go deleted file mode 100644 index ce03e29524..0000000000 --- a/tests/integration/cli/client_index_drop_test.go +++ /dev/null @@ -1,118 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" -) - -func TestIndexDrop_IfNoArgs_ShowUsage(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{"client", "index", "drop"}) - assertContainsSubstring(t, stderr, "Usage") -} - -const userColIndexOnNameFieldName = "users_name_index" - -func createIndexOnName(t *testing.T, conf DefraNodeConfig) { - createIndexOnField(t, conf, "User", "name", userColIndexOnNameFieldName) -} - -func createIndexOnField(t *testing.T, conf DefraNodeConfig, colName, fieldName, indexName string) { - runDefraCommand(t, conf, []string{ - "client", "index", "create", - "--collection", colName, - "--fields", fieldName, - "--name", indexName, - }) -} - -func TestIndexDrop_IfNoNameArg_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - createIndexOnName(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "index", "drop", - "--collection", "User", - }) - stopDefra() - - assertContainsSubstring(t, stderr, "missing argument") -} - -func TestIndexDrop_IfNoCollectionArg_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - createIndexOnName(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "index", "drop", - "--name", "users_name_index", - }) - stopDefra() - - assertContainsSubstring(t, stderr, "missing argument") -} - -func TestIndexDrop_IfCollectionWithIndexExists_ShouldDropIndex(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - createIndexOnName(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "drop", - "--collection", "User", - "--name", "users_name_index", - }) - nodeLog := stopDefra() - - jsonResponse := `{"data":{"result":"success"}}` - assertContainsSubstring(t, stdout, jsonResponse) - assertNotContainsSubstring(t, stdout, "errors") - assertNotContainsSubstring(t, nodeLog, "errors") -} - -func TestIndexDrop_IfCollectionDoesNotExist_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "drop", - "--collection", "User", - "--name", "users_name_index", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "errors") -} - -func TestIndexDrop_IfInternalError_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "drop", - "--collection", "User", - "--name", "users_name_index", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "errors") -} diff --git a/tests/integration/cli/client_index_list_test.go b/tests/integration/cli/client_index_list_test.go deleted file mode 100644 index cb2f7d5fac..0000000000 --- a/tests/integration/cli/client_index_list_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "encoding/json" - "testing" - - "github.com/sourcenetwork/defradb/client" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestIndexList_IfCollectionIsNotSpecified_ShouldReturnAllIndexes(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createCollection(t, conf, `type User { name: String }`) - createCollection(t, conf, `type Product { name: String price: Int }`) - createIndexOnField(t, conf, "User", "name", "") - createIndexOnField(t, conf, "Product", "name", "") - createIndexOnField(t, conf, "Product", "price", "") - - stdout, _ := runDefraCommand(t, conf, []string{"client", "index", "list"}) - nodeLog := stopDefra() - - var resp struct { - Data struct { - Collections map[string][]client.IndexDescription `json:"collections"` - } `json:"data"` - } - err := json.Unmarshal([]byte(stdout[0]), &resp) - require.NoError(t, err) - - assert.Equal(t, len(resp.Data.Collections), 2) - assert.Equal(t, len(resp.Data.Collections["User"]), 1) - assert.Equal(t, len(resp.Data.Collections["Product"]), 2) - - assertNotContainsSubstring(t, stdout, "errors") - assertNotContainsSubstring(t, nodeLog, "errors") -} - -func TestIndexList_IfCollectionIsSpecified_ShouldReturnCollectionsIndexes(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - createUserCollection(t, conf) - createIndexOnName(t, conf) - - createCollection(t, conf, `type Product { name: String price: Int }`) - createIndexOnField(t, conf, "Product", "name", "") - createIndexOnField(t, conf, "Product", "price", "") - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "list", - "--collection", "User", - }) - nodeLog := stopDefra() - - var resp struct { - Data struct { - Indexes []client.IndexDescription `json:"indexes"` - } `json:"data"` - } - err := json.Unmarshal([]byte(stdout[0]), &resp) - require.NoError(t, err) - - expectedDesc := client.IndexDescription{Name: userColIndexOnNameFieldName, ID: 1, Fields: []client.IndexedFieldDescription{{Name: "name", Direction: client.Ascending}}} - assert.Equal(t, 1, len(resp.Data.Indexes)) - assert.Equal(t, expectedDesc, resp.Data.Indexes[0]) - - assertNotContainsSubstring(t, stdout, "errors") - assertNotContainsSubstring(t, nodeLog, "errors") -} - -func TestIndexList_IfInternalError_ShouldFail(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "index", "list", - "--collection", "User", - }) - stopDefra() - - assertContainsSubstring(t, stdout, "errors") -} diff --git a/tests/integration/cli/client_peerid_test.go b/tests/integration/cli/client_peerid_test.go deleted file mode 100644 index 0592fd4aa1..0000000000 --- a/tests/integration/cli/client_peerid_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" -) - -func TestPeerID(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "peerid"}) - - defraLogLines := stopDefra() - - assertNotContainsSubstring(t, defraLogLines, "ERROR") - - assertContainsSubstring(t, stdout, "peerID") -} - -func TestPeerIDWithNoHost(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{"client", "peerid"}) - assertContainsSubstring(t, stderr, "failed to request PeerID") -} diff --git a/tests/integration/cli/client_ping_test.go b/tests/integration/cli/client_ping_test.go deleted file mode 100644 index a4e1eef96f..0000000000 --- a/tests/integration/cli/client_ping_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/config" -) - -func TestPingSimple(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "ping"}) - - nodeLog := stopDefra() - - assert.Contains(t, stdout, `{"data":{"response":"pong"}}`) - for _, line := range nodeLog { - assert.NotContains(t, line, "ERROR") - } -} - -func TestPingCommandToInvalidHost(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - _, stderr := runDefraCommand(t, conf, []string{"client", "ping", "--url", "'1!2:3!4'"}) - - nodeLog := stopDefra() - - for _, line := range nodeLog { - assert.NotContains(t, line, "ERROR") - } - // for some line in stderr to contain the error message - for _, line := range stderr { - if strings.Contains(line, config.ErrFailedToValidateConfig.Error()) { - return - } - } - t.Error("expected error message not found in stderr") -} - -func TestPingCommandNoHost(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - p, err := findFreePortInRange(t, 49152, 65535) - assert.NoError(t, err) - addr := fmt.Sprintf("localhost:%d", p) - _, stderr := runDefraCommand(t, conf, []string{"client", "ping", "--url", addr}) - assertContainsSubstring(t, stderr, "failed to send ping") -} diff --git a/tests/integration/cli/client_query_test.go b/tests/integration/cli/client_query_test.go deleted file mode 100644 index 6ca98cbade..0000000000 --- a/tests/integration/cli/client_query_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" -) - -func TestRequestSimple(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "query", - "query IntrospectionQuery {__schema {queryType { name }}}", - }) - nodeLog := stopDefra() - - assertContainsSubstring(t, stdout, "Query") - assertNotContainsSubstring(t, nodeLog, "ERROR") -} - -func TestRequestInvalidQuery(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "query", "{}}"}) - _ = stopDefra() - - assertContainsSubstring(t, stdout, "Syntax Error") -} - -func TestRequestWithErrorNoType(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - defer stopDefra() - - stdout, _ := runDefraCommand(t, conf, []string{"client", "query", "query { User { whatever } }"}) - - assertContainsSubstring(t, stdout, "Cannot query field") -} - -func TestRequestWithErrorNoField(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - defer stopDefra() - - fname := schemaFileFixture(t, "schema.graphql", ` - type User { - id: ID - name: String - }`) - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname}) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", "query { User { nonexistent } }"}) - - assertContainsSubstring(t, stdout, `Cannot query field \"nonexistent\"`) -} - -func TestRequestQueryFromFile(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - defer stopDefra() - - fname := schemaFileFixture(t, "schema.graphql", ` - type User123 { - XYZ: String - }`) - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname}) - assertContainsSubstring(t, stdout, "success") - - fname = schemaFileFixture(t, "query.graphql", ` - query { - __schema { - types { - name - fields { - name - type { - name - kind - } - } - } - } - }`) - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", "-f", fname}) - - assertContainsSubstring(t, stdout, "Query") - - // Check that the User type is correctly returned - assertContainsSubstring(t, stdout, "User123") - assertContainsSubstring(t, stdout, "XYZ") -} diff --git a/tests/integration/cli/client_rpc_p2p_collection_test.go b/tests/integration/cli/client_rpc_p2p_collection_test.go deleted file mode 100644 index b44abcaefb..0000000000 --- a/tests/integration/cli/client_rpc_p2p_collection_test.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -// TBD diff --git a/tests/integration/cli/client_rpc_replicator_test.go b/tests/integration/cli/client_rpc_replicator_test.go deleted file mode 100644 index 1fd0e3c351..0000000000 --- a/tests/integration/cli/client_rpc_replicator_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "fmt" - "testing" -) - -func TestReplicatorGetAllEmpty(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - portTCP, err := findFreePortInRange(t, 49152, 65535) - if err != nil { - t.Fatal(err) - } - conf.GRPCAddr = fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", portTCP) - if err != nil { - t.Fatal(err) - } - - stopDefra := runDefraNode(t, conf) - defer stopDefra() - - tcpAddr := fmt.Sprintf("localhost:%d", portTCP) - _, stderr := runDefraCommand(t, conf, []string{"client", "--addr", tcpAddr, "rpc", "replicator", "getall"}) - assertContainsSubstring(t, stderr, "No replicator found") -} diff --git a/tests/integration/cli/client_schema_add_test.go b/tests/integration/cli/client_schema_add_test.go deleted file mode 100644 index 12d2e5e539..0000000000 --- a/tests/integration/cli/client_schema_add_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestAddSchemaFromFile(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - fname := schemaFileFixture(t, "schema.graphql", ` - type User { - id: ID - name: String - }`) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname}) - - nodeLog := stopDefra() - - jsonReponse := `{"data":{"collections":[{"name":"User","id":"bafkreib5hb7mr7ecbdufd7mvv6va6mpxukjai7hpnqkhxonnw7lzwfqlja","version_id":"bafkreib5hb7mr7ecbdufd7mvv6va6mpxukjai7hpnqkhxonnw7lzwfqlja"}],"result":"success"}}` - assert.Contains(t, stdout, jsonReponse) - assertNotContainsSubstring(t, nodeLog, "ERROR") -} - -func TestAddSchemaWithDuplicateType(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - fname1 := schemaFileFixture(t, "schema1.graphql", `type Post { id: ID title: String }`) - fname2 := schemaFileFixture(t, "schema2.graphql", `type Post { id: ID author: String }`) - - stdout1, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname1}) - stdout2, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname2}) - - _ = stopDefra() - - jsonReponse := `{"data":{"collections":[{"name":"Post","id":"bafkreicgpbla5wlogpinnm32arcqzptusdc5tzdznipqrf6nkroav6b25a","version_id":"bafkreicgpbla5wlogpinnm32arcqzptusdc5tzdznipqrf6nkroav6b25a"}],"result":"success"}}` - assertContainsSubstring(t, stdout1, jsonReponse) - assertContainsSubstring(t, stdout2, `schema type already exists. Name: Post`) -} diff --git a/tests/integration/cli/client_schema_migration_get_test.go b/tests/integration/cli/client_schema_migration_get_test.go deleted file mode 100644 index dd70879433..0000000000 --- a/tests/integration/cli/client_schema_migration_get_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "fmt" - "testing" - - "github.com/sourcenetwork/defradb/tests/lenses" -) - -func TestSchemaMigrationGet_GivenOneArg_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "get", - "notAnArg", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "too many arguments. Max: 0, Actual: 1") -} - -func TestSchemaMigrationGet_GivenNoMigrations_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "get", - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, `{"data":{"configuration":[]}}`) -} - -func TestSchemaMigrationGet_GivenEmptyMigrationObj_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", "{}", - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "get", - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, - `{"data":{"configuration":[{"SourceSchemaVersionID":"bae123","DestinationSchemaVersionID":"bae456","Lenses":null}]}}`, - ) -} - -func TestSchemaMigrationGet_GivenEmptyMigration_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", `{"lenses": []}`, - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "get", - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, - `{"data":{"configuration":[{"SourceSchemaVersionID":"bae123","DestinationSchemaVersionID":"bae456","Lenses":[]}]}}`, - ) -} - -func TestSchemaMigrationGet_GivenMigration_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", - fmt.Sprintf(`{"lenses": [{"path":"%s","arguments":{"dst":"verified","value":true}}]}`, lenses.SetDefaultModulePath), - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "get", - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, - `{"data":{"configuration":[{"SourceSchemaVersionID":"bae123","DestinationSchemaVersionID":"bae456","Lenses":[`+ - fmt.Sprintf( - `{"Path":"%s",`, - lenses.SetDefaultModulePath, - )+ - `"Inverse":false,"Arguments":{"dst":"verified","value":true}}`+ - `]}]}}`, - ) -} diff --git a/tests/integration/cli/client_schema_migration_set_test.go b/tests/integration/cli/client_schema_migration_set_test.go deleted file mode 100644 index d97a4e77d8..0000000000 --- a/tests/integration/cli/client_schema_migration_set_test.go +++ /dev/null @@ -1,244 +0,0 @@ -// Copyright 2023 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "fmt" - "testing" - - "github.com/sourcenetwork/defradb/tests/lenses" -) - -func TestSchemaMigrationSet_GivenEmptyArgs_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{"client", "schema", "migration", "set"}) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "missing arguments. Required: src, dst, cfg") -} - -func TestSchemaMigrationSet_GivenOneArg_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "missing arguments. Required: src, dst, cfg") -} - -func TestSchemaMigrationSet_GivenTwoArgs_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "missing argument. Name: cfg") -} - -func TestSchemaMigrationSet_GivenFourArgs_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", "cfg", "extraArg", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "too many arguments. Max: 3, Actual: 4") -} - -func TestSchemaMigrationSet_GivenEmptySrcArg_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "", "bae", "path", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "missing argument. Name: src") -} - -func TestSchemaMigrationSet_GivenEmptyDstArg_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae", "", "path", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "missing argument. Name: dst") -} - -func TestSchemaMigrationSet_GivenEmptyCfgArg_ShouldReturnError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", "", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "missing argument. Name: cfg") -} - -func TestSchemaMigrationSet_GivenInvalidCfgJsonObject_ShouldError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", "{--notvalidjson", - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "invalid lens configuration: invalid character") -} - -func TestSchemaMigrationSet_GivenEmptyCfgObject_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", "{}", - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, "success") -} - -func TestSchemaMigrationSet_GivenCfgWithNoLenses_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", `{"lenses": []}`, - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, "success") -} - -func TestSchemaMigrationSet_GivenCfgWithNoLensesUppercase_ShouldSucceed(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", `{"Lenses": []}`, - }) - _ = stopDefra() - - assertContainsSubstring(t, stdout, "success") -} - -func TestSchemaMigrationSet_GivenCfgWithUnknownProp_ShouldError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", `{"NotAProp": []}`, - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "invalid lens configuration: json: unknown field") -} - -func TestSchemaMigrationSet_GivenCfgWithUnknownPath_ShouldError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - _, stderr := runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bae123", "bae456", `{"Lenses": [{"path":"notAPath"}]}`, - }) - _ = stopDefra() - - assertContainsSubstring(t, stderr, "no such file or directory") -} - -func TestSchemaMigrationSet_GivenCfgWithLenses_ShouldSucceedAndMigrateDoc(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", `type Users { name: String }`}) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", `mutation { create_Users(data:"{\"name\":\"John\"}") { name } }`}) - assertContainsSubstring(t, stdout, `{"data":[{"name":"John"}]}`) - - stdout, _ = runDefraCommand(t, conf, []string{"client", "schema", "patch", - `[{ "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} }]`, - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", - fmt.Sprintf(`{"lenses": [{"path":"%s","arguments":{"dst":"verified","value":true}}]}`, lenses.SetDefaultModulePath), - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", "query { Users { name verified } }"}) - _ = stopDefra() - - assertContainsSubstring(t, stdout, `{"data":[{"name":"John","verified":true}]}`) -} - -func TestSchemaMigrationSet_GivenCfgWithLenseError_ShouldError(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", `type Users { name: String }`}) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", `mutation { create_Users(data:"{\"name\":\"John\"}") { name } }`}) - assertContainsSubstring(t, stdout, `{"data":[{"name":"John"}]}`) - - stdout, _ = runDefraCommand(t, conf, []string{"client", "schema", "patch", - `[{ "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} }]`, - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{ - "client", "schema", "migration", "set", - "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", - // Do not set lens parameters in order to generate error - fmt.Sprintf(`{"lenses": [{"path":"%s"}]}`, lenses.SetDefaultModulePath), - }) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", "query { Users { name verified } }"}) - _ = stopDefra() - - // Error generated from within lens module lazily executing within the query - assertContainsSubstring(t, stdout, "Parameters have not been set.") -} diff --git a/tests/integration/cli/client_schema_patch_test.go b/tests/integration/cli/client_schema_patch_test.go deleted file mode 100644 index 487dc9eda5..0000000000 --- a/tests/integration/cli/client_schema_patch_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" -) - -func TestClientSchemaPatch(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - defer stopDefra() - - fname := schemaFileFixture(t, "schema.graphql", ` - type User { - id: ID - name: String - }`) - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname}) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "schema", "patch", `[{ "op": "add", "path": "/User/Schema/Fields/-", "value": {"Name": "address", "Kind": "String"} }]`}) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "query", `query IntrospectionQuery { __type (name: "User") { fields { name } }}`}) - assertContainsSubstring(t, stdout, "address") -} - -func TestClientSchemaPatch_InvalidJSONPatch(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stopDefra := runDefraNode(t, conf) - defer stopDefra() - - fname := schemaFileFixture(t, "schema.graphql", ` - type User { - id: ID - name: String - } - `) - stdout, _ := runDefraCommand(t, conf, []string{"client", "schema", "add", "-f", fname}) - assertContainsSubstring(t, stdout, "success") - - stdout, _ = runDefraCommand(t, conf, []string{"client", "schema", "patch", `[{ "op": "invalidOp" }]`}) - assertContainsSubstring(t, stdout, "Internal Server Error") -} diff --git a/tests/integration/cli/init_test.go b/tests/integration/cli/init_test.go deleted file mode 100644 index 7292d920c3..0000000000 --- a/tests/integration/cli/init_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/config" -) - -// Executing init command creates valid config file. -func TestCLIInitCommand(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{"init", "--rootdir", conf.rootDir}) - cfgfilePath := filepath.Join(conf.rootDir, config.DefaultConfigFileName) - assertContainsSubstring(t, stderr, "Created config file at "+cfgfilePath) - if !assert.FileExists(t, cfgfilePath) { - t.Fatal("Config file not created") - } -} - -func TestCLIInitCommandTwiceErrors(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - cfgfilePath := filepath.Join(conf.rootDir, config.DefaultConfigFileName) - _, stderr := runDefraCommand(t, conf, []string{"init", "--rootdir", conf.rootDir}) - assertContainsSubstring(t, stderr, "Created config file at "+cfgfilePath) - _, stderr = runDefraCommand(t, conf, []string{"init", "--rootdir", conf.rootDir}) - assertContainsSubstring(t, stderr, "Configuration file already exists at "+cfgfilePath) -} - -// Executing init command twice, but second time reinitializing. -func TestInitCommandTwiceReinitalize(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - cfgfilePath := filepath.Join(conf.rootDir, config.DefaultConfigFileName) - _, stderr := runDefraCommand(t, conf, []string{"init", "--rootdir", conf.rootDir}) - assertContainsSubstring(t, stderr, "Created config file at "+cfgfilePath) - _, stderr = runDefraCommand(t, conf, []string{"init", "--rootdir", conf.rootDir, "--reinitialize"}) - assertContainsSubstring(t, stderr, "Deleted config file at "+cfgfilePath) - assertContainsSubstring(t, stderr, "Created config file at "+cfgfilePath) -} diff --git a/tests/integration/cli/log_config_test.go b/tests/integration/cli/log_config_test.go deleted file mode 100644 index 55d1b18154..0000000000 --- a/tests/integration/cli/log_config_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "bufio" - "bytes" - "context" - "fmt" - "io" - "os" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/cli" - "github.com/sourcenetwork/defradb/config" - "github.com/sourcenetwork/defradb/logging" -) - -const ( - testLogger1 = "testLogger1" - testLogger2 = "testLogger2" - testLogger3 = "testLogger3" -) - -var ( - log1 = logging.MustNewLogger(testLogger1) - log2 = logging.MustNewLogger(testLogger2) - log3 = logging.MustNewLogger(testLogger3) -) - -func TestCLILogsToStderrGivenNamedLogLevel(t *testing.T) { - ctx := context.Background() - logLines := captureLogLines( - t, - func() { - // set the log levels - // general: error - // testLogger1: debug - // testLogger2: info - os.Args = append(os.Args, "--loglevel") - os.Args = append(os.Args, fmt.Sprintf("%s,%s=debug,%s=info", "error", testLogger1, testLogger2)) - }, - func() { - log1.Error(ctx, "error") - log1.Debug(ctx, "debug") - log2.Info(ctx, "info") - log3.Debug(ctx, "debug") // wont print, as logger3 will use global level defined above as 'error' - log3.Info(ctx, "info") // wont print, as logger3 will use global level defined above as 'error' - }, - ) - - assert.Len(t, logLines, 3) -} - -func captureLogLines(t *testing.T, setup func(), predicate func()) []string { - r, w, err := os.Pipe() - if err != nil { - t.Fatal(err) - } - stderr := os.Stderr - os.Stderr = w - defer func() { - os.Stderr = stderr - }() - - directory := t.TempDir() - - // Set the default logger output path to a file in the temp dir - // so that production logs don't polute and confuse the tests - // os.Args = append(os.Args, "--logoutput", directory+"/log.txt") - os.Args = append(os.Args, "init", "--rootdir", directory) - - setup() - cfg := config.DefaultConfig() - defraCmd := cli.NewDefraCommand(cfg) - if err := defraCmd.Execute(context.Background()); err != nil { - t.Fatal(err) - } - predicate() - log1.Flush() - log2.Flush() - log3.Flush() - - w.Close() - var buf bytes.Buffer - _, _ = io.Copy(&buf, r) - logLines, err := parseLines(&buf) - if err != nil { - t.Fatal(err) - } - - return logLines -} - -func parseLines(r io.Reader) ([]string, error) { - fileScanner := bufio.NewScanner(r) - - fileScanner.Split(bufio.ScanLines) - - logLines := []string{} - for fileScanner.Scan() { - logLines = append(logLines, fileScanner.Text()) - } - - return logLines, nil -} diff --git a/tests/integration/cli/root_test.go b/tests/integration/cli/root_test.go deleted file mode 100644 index 33df29fc4d..0000000000 --- a/tests/integration/cli/root_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestRootCommandEmptyRootDir(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{}) - assert.Contains(t, stdout, "Usage:") -} - -func TestRootCommandRootDirWithDefaultConfig(t *testing.T) { - conf := DefraNodeConfig{ - logPath: t.TempDir(), - } - stdout, _ := runDefraCommand(t, conf, []string{}) - assert.Contains(t, stdout, "Usage:") -} - -func TestRootCommandRootDirFromEnv(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{}) - assert.Contains(t, stdout, "Usage:") -} - -func TestRootCommandRootWithNonexistentFlag(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, _ := runDefraCommand(t, conf, []string{"--foo"}) - assert.Contains(t, stdout, "Usage:") -} diff --git a/tests/integration/cli/serverdump_test.go b/tests/integration/cli/serverdump_test.go deleted file mode 100644 index ed8fcd4d9f..0000000000 --- a/tests/integration/cli/serverdump_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "testing" -) - -func TestServerDumpMemoryErrs(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{"server-dump", "--store", "memory"}) - assertContainsSubstring(t, stderr, "server-side dump is only supported for the Badger datastore") -} - -func TestServerDumpInvalidStoreErrs(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{"server-dump", "--store", "invalid"}) - // assertContainsSubstring(t, stderr, "invalid datastore type") - assertContainsSubstring(t, stderr, "server-side dump is only supported for the Badger datastore") -} diff --git a/tests/integration/cli/start_test.go b/tests/integration/cli/start_test.go deleted file mode 100644 index 1a6267f190..0000000000 --- a/tests/integration/cli/start_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "fmt" - "testing" -) - -func TestStartCommandBasic(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{ - "start", - "--url", conf.APIURL, - "--tcpaddr", conf.GRPCAddr, - }) - assertContainsSubstring(t, stderr, "Starting DefraDB service...") - assertNotContainsSubstring(t, stderr, "Error") -} - -func TestStartCommandWithTLSIncomplete(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{ - "start", - "--tls", - "--url", conf.APIURL, - "--tcpaddr", conf.GRPCAddr, - }) - assertContainsSubstring(t, stderr, "Starting DefraDB service...") - assertContainsSubstring(t, stderr, "Error") -} - -func TestStartCommandWithStoreMemory(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{ - "start", "--store", "memory", - "--url", conf.APIURL, - "--tcpaddr", conf.GRPCAddr, - }) - assertContainsSubstring(t, stderr, "Starting DefraDB service...") - assertContainsSubstring(t, stderr, "Building new memory store") - assertNotContainsSubstring(t, stderr, "Error") -} - -func TestStartCommandWithP2PAddr(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - p2pport, err := findFreePortInRange(t, 49152, 65535) - if err != nil { - t.Fatal(err) - } - addr := fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", p2pport) - _, stderr := runDefraCommand(t, conf, []string{ - "start", - "--p2paddr", addr, - "--url", conf.APIURL, - "--tcpaddr", conf.GRPCAddr, - }) - assertContainsSubstring(t, stderr, "Starting DefraDB service...") - logstring := fmt.Sprintf("Starting P2P node, {\"P2P address\": \"%s\"}", addr) - assertContainsSubstring(t, stderr, logstring) - assertNotContainsSubstring(t, stderr, "Error") -} - -func TestStartCommandWithNoP2P(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{ - "start", - "--no-p2p", - }) - assertContainsSubstring(t, stderr, "Starting DefraDB service...") - assertNotContainsSubstring(t, stderr, "Starting P2P node") - assertNotContainsSubstring(t, stderr, "Error") -} - -func TestStartCommandWithInvalidStoreType(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - _, stderr := runDefraCommand(t, conf, []string{ - "start", - "--store", "invalid", - }) - assertContainsSubstring(t, stderr, "failed to load config: failed to validate config: invalid store type") -} diff --git a/tests/integration/cli/utils.go b/tests/integration/cli/utils.go deleted file mode 100644 index c94ce222dc..0000000000 --- a/tests/integration/cli/utils.go +++ /dev/null @@ -1,263 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -/* -Package clitest provides a testing framework for the Defra CLI, along with CLI integration tests. -*/ -package clitest - -import ( - "bufio" - "bytes" - "context" - "errors" - "fmt" - "io" - "math/rand" - "net" - "os" - "path/filepath" - "strings" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/sourcenetwork/defradb/cli" - "github.com/sourcenetwork/defradb/config" -) - -const COMMAND_TIMEOUT_SECONDS = 2 * time.Second -const SUBCOMMAND_TIME_BUFFER_SECONDS = 200 * time.Millisecond - -type DefraNodeConfig struct { - rootDir string - logPath string - APIURL string - GRPCAddr string -} - -func NewDefraNodeDefaultConfig(t *testing.T) DefraNodeConfig { - t.Helper() - portAPI, err := findFreePortInRange(t, 49152, 65535) - if err != nil { - t.Fatal(err) - } - portGRPC, err := findFreePortInRange(t, 49152, 65535) - if err != nil { - t.Fatal(err) - } - - return DefraNodeConfig{ - rootDir: t.TempDir(), - logPath: "", - APIURL: fmt.Sprintf("localhost:%d", portAPI), - GRPCAddr: fmt.Sprintf("/ip4/0.0.0.0/tcp/%d", portGRPC), - } -} - -// runDefraNode runs a defra node in a separate goroutine and returns a stopping function -// which also returns the node's execution log lines. -func runDefraNode(t *testing.T, conf DefraNodeConfig) func() []string { - t.Helper() - - if conf.logPath == "" { - conf.logPath = filepath.Join(t.TempDir(), "defra.log") - } - - var args []string - if conf.rootDir != "" { - args = append(args, "--rootdir", conf.rootDir) - } - if conf.APIURL != "" { - args = append(args, "--url", conf.APIURL) - } - if conf.GRPCAddr != "" { - args = append(args, "--tcpaddr", conf.GRPCAddr) - } - args = append(args, "--logoutput", conf.logPath) - - cfg := config.DefaultConfig() - ctx, cancel := context.WithCancel(context.Background()) - ready := make(chan struct{}) - go func(ready chan struct{}) { - defraCmd := cli.NewDefraCommand(cfg) - defraCmd.RootCmd.SetArgs( - append([]string{"start"}, args...), - ) - ready <- struct{}{} - err := defraCmd.Execute(ctx) - assert.NoError(t, err) - }(ready) - <-ready - time.Sleep(SUBCOMMAND_TIME_BUFFER_SECONDS) - cancelAndOutput := func() []string { - cancel() - time.Sleep(SUBCOMMAND_TIME_BUFFER_SECONDS) - lines, err := readLoglines(t, conf.logPath) - assert.NoError(t, err) - return lines - } - return cancelAndOutput -} - -// Runs a defra command and returns the stdout and stderr output. -func runDefraCommand(t *testing.T, conf DefraNodeConfig, args []string) (stdout, stderr []string) { - t.Helper() - cfg := config.DefaultConfig() - args = append([]string{ - "--url", conf.APIURL, - }, args...) - if !contains(args, "--rootdir") { - args = append(args, "--rootdir", t.TempDir()) - } - - ctx, cancel := context.WithTimeout(context.Background(), COMMAND_TIMEOUT_SECONDS) - defer cancel() - - stdout, stderr = captureOutput(func() { - defraCmd := cli.NewDefraCommand(cfg) - t.Log("executing defra command with args", args) - defraCmd.RootCmd.SetArgs(args) - _ = defraCmd.Execute(ctx) - }) - return stdout, stderr -} - -func contains(args []string, arg string) bool { - for _, a := range args { - if a == arg { - return true - } - } - return false -} - -func readLoglines(t *testing.T, fpath string) ([]string, error) { - f, err := os.Open(fpath) - if err != nil { - return nil, err - } - defer f.Close() //nolint:errcheck - scanner := bufio.NewScanner(f) - lines := make([]string, 0) - for scanner.Scan() { - lines = append(lines, scanner.Text()) - } - err = scanner.Err() - assert.NoError(t, err) - return lines, nil -} - -func captureOutput(f func()) (stdout, stderr []string) { - oldStdout := os.Stdout - oldStderr := os.Stderr - rStdout, wStdout, err := os.Pipe() - if err != nil { - panic(err) - } - rStderr, wStderr, err := os.Pipe() - if err != nil { - panic(err) - } - os.Stdout = wStdout - os.Stderr = wStderr - - f() - - if err := wStdout.Close(); err != nil { - panic(err) - } - if err := wStderr.Close(); err != nil { - panic(err) - } - - os.Stdout = oldStdout - os.Stderr = oldStderr - - var stdoutBuf, stderrBuf bytes.Buffer - if _, err := io.Copy(&stdoutBuf, rStdout); err != nil { - panic(err) - } - if _, err := io.Copy(&stderrBuf, rStderr); err != nil { - panic(err) - } - - stdout = strings.Split(strings.TrimSuffix(stdoutBuf.String(), "\n"), "\n") - stderr = strings.Split(strings.TrimSuffix(stderrBuf.String(), "\n"), "\n") - - return -} - -var portsInUse = make(map[int]struct{}) -var portMutex = sync.Mutex{} - -// findFreePortInRange returns a free port in the range [minPort, maxPort]. -// The range of ports that are unfrequently used is [49152, 65535]. -func findFreePortInRange(t *testing.T, minPort, maxPort int) (int, error) { - if minPort < 1 || maxPort > 65535 || minPort > maxPort { - return 0, errors.New("invalid port range") - } - - const maxAttempts = 100 - for i := 0; i < maxAttempts; i++ { - port := rand.Intn(maxPort-minPort+1) + minPort - if _, ok := portsInUse[port]; ok { - continue - } - addr := fmt.Sprintf("127.0.0.1:%d", port) - listener, err := net.Listen("tcp", addr) - if err == nil { - portMutex.Lock() - portsInUse[port] = struct{}{} - portMutex.Unlock() - t.Cleanup(func() { - portMutex.Lock() - delete(portsInUse, port) - portMutex.Unlock() - }) - _ = listener.Close() - return port, nil - } - } - - return 0, errors.New("unable to find a free port") -} - -func assertContainsSubstring(t *testing.T, haystack []string, substring string) { - t.Helper() - if !containsSubstring(haystack, substring) { - t.Fatalf("expected %q to contain %q", haystack, substring) - } -} - -func assertNotContainsSubstring(t *testing.T, haystack []string, substring string) { - t.Helper() - if containsSubstring(haystack, substring) { - t.Fatalf("expected %q to not contain %q", haystack, substring) - } -} - -func containsSubstring(haystack []string, substring string) bool { - for _, s := range haystack { - if strings.Contains(s, substring) { - return true - } - } - return false -} - -func schemaFileFixture(t *testing.T, fname string, schema string) string { - absFname := filepath.Join(t.TempDir(), fname) - err := os.WriteFile(absFname, []byte(schema), 0644) - assert.NoError(t, err) - return absFname -} diff --git a/tests/integration/cli/version_test.go b/tests/integration/cli/version_test.go deleted file mode 100644 index bc9c2a7e25..0000000000 --- a/tests/integration/cli/version_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2022 Democratized Data Foundation -// -// Use of this software is governed by the Business Source License -// included in the file licenses/BSL.txt. -// -// As of the Change Date specified in that file, in accordance with -// the Business Source License, use of this software will be governed -// by the Apache License, Version 2.0, included in the file -// licenses/APL.txt. - -package clitest - -import ( - "encoding/json" - "strings" - "testing" - - "github.com/stretchr/testify/assert" -) - -// note: this assumes the version information *without* build-time info integrated. -func TestExecVersion(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, stderr := runDefraCommand(t, conf, []string{"version"}) - for _, line := range stderr { - assert.NotContains(t, line, "ERROR") - } - output := strings.Join(stdout, " ") - assert.Contains(t, output, "defradb") - assert.Contains(t, output, "built with Go") -} - -func TestExecVersionJSON(t *testing.T) { - conf := NewDefraNodeDefaultConfig(t) - stdout, stderr := runDefraCommand(t, conf, []string{"version", "--format", "json"}) - for _, line := range stderr { - assert.NotContains(t, line, "ERROR") - } - output := strings.Join(stdout, " ") - assert.Contains(t, output, "go\":") - assert.Contains(t, output, "commit\":") - assert.Contains(t, output, "commitdate\":") - var data map[string]any - err := json.Unmarshal([]byte(output), &data) - assert.NoError(t, err) -} diff --git a/tests/integration/client.go b/tests/integration/client.go new file mode 100644 index 0000000000..1d06bfc744 --- /dev/null +++ b/tests/integration/client.go @@ -0,0 +1,85 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "fmt" + "os" + "strconv" + + "github.com/sourcenetwork/defradb/net" + "github.com/sourcenetwork/defradb/tests/clients" + "github.com/sourcenetwork/defradb/tests/clients/cli" + "github.com/sourcenetwork/defradb/tests/clients/http" +) + +const ( + clientGoEnvName = "DEFRA_CLIENT_GO" + clientHttpEnvName = "DEFRA_CLIENT_HTTP" + clientCliEnvName = "DEFRA_CLIENT_CLI" +) + +type ClientType string + +const ( + // goClientType enables running the test suite using + // the go implementation of the client.DB interface. + GoClientType ClientType = "go" + // httpClientType enables running the test suite using + // the http implementation of the client.DB interface. + HTTPClientType ClientType = "http" + // cliClientType enables running the test suite using + // the cli implementation of the client.DB interface. + CLIClientType ClientType = "cli" +) + +var ( + httpClient bool + goClient bool + cliClient bool +) + +func init() { + // We use environment variables instead of flags `go test ./...` throws for all packages + // that don't have the flag defined + httpClient, _ = strconv.ParseBool(os.Getenv(clientHttpEnvName)) + goClient, _ = strconv.ParseBool(os.Getenv(clientGoEnvName)) + cliClient, _ = strconv.ParseBool(os.Getenv(clientCliEnvName)) + + if !goClient && !httpClient && !cliClient { + // Default is to test go client type. + goClient = true + } +} + +// setupClient returns the client implementation for the current +// testing state. The client type on the test state is used to +// select the client implementation to use. +func setupClient(s *state, node *net.Node) (impl clients.Client, err error) { + switch s.clientType { + case HTTPClientType: + impl, err = http.NewWrapper(node) + + case CLIClientType: + impl, err = cli.NewWrapper(node) + + case GoClientType: + impl = node + + default: + err = fmt.Errorf("invalid client type: %v", s.dbt) + } + + if err != nil { + return nil, err + } + return +} diff --git a/tests/integration/collection/update/simple/with_keys_test.go b/tests/integration/collection/update/simple/with_keys_test.go index 63f9ce7b55..d36e140852 100644 --- a/tests/integration/collection/update/simple/with_keys_test.go +++ b/tests/integration/collection/update/simple/with_keys_test.go @@ -160,7 +160,7 @@ func TestUpdateWithKeys(t *testing.T) { return err } - assert.Equal(t, uint64(40), name) + assert.Equal(t, int64(40), name) d2, err := c.Get(ctx, doc2.Key(), false) if err != nil { @@ -172,7 +172,7 @@ func TestUpdateWithKeys(t *testing.T) { return err } - assert.Equal(t, uint64(40), name2) + assert.Equal(t, int64(40), name2) return nil }, diff --git a/tests/integration/db.go b/tests/integration/db.go new file mode 100644 index 0000000000..b103f656b3 --- /dev/null +++ b/tests/integration/db.go @@ -0,0 +1,151 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "context" + "fmt" + "os" + "strconv" + "testing" + + badger "github.com/sourcenetwork/badger/v4" + + "github.com/sourcenetwork/defradb/client" + badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" + "github.com/sourcenetwork/defradb/datastore/memory" + "github.com/sourcenetwork/defradb/db" + changeDetector "github.com/sourcenetwork/defradb/tests/change_detector" +) + +type DatabaseType string + +const ( + memoryBadgerEnvName = "DEFRA_BADGER_MEMORY" + fileBadgerEnvName = "DEFRA_BADGER_FILE" + fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH" + inMemoryEnvName = "DEFRA_IN_MEMORY" +) + +const ( + badgerIMType DatabaseType = "badger-in-memory" + defraIMType DatabaseType = "defra-memory-datastore" + badgerFileType DatabaseType = "badger-file-system" +) + +var ( + badgerInMemory bool + badgerFile bool + inMemoryStore bool + databaseDir string +) + +func init() { + // We use environment variables instead of flags `go test ./...` throws for all packages + // that don't have the flag defined + badgerFile, _ = strconv.ParseBool(os.Getenv(fileBadgerEnvName)) + badgerInMemory, _ = strconv.ParseBool(os.Getenv(memoryBadgerEnvName)) + inMemoryStore, _ = strconv.ParseBool(os.Getenv(inMemoryEnvName)) + + if changeDetector.Enabled { + // Change detector only uses badger file db type. + badgerFile = true + badgerInMemory = false + inMemoryStore = false + } else if !badgerInMemory && !badgerFile && !inMemoryStore { + // Default is to test all but filesystem db types. + badgerFile = false + badgerInMemory = true + inMemoryStore = true + } +} + +func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) { + opts := badgerds.Options{ + Options: badger.DefaultOptions("").WithInMemory(true), + } + rootstore, err := badgerds.NewDatastore("", &opts) + if err != nil { + return nil, err + } + db, err := db.NewDB(ctx, rootstore, dbopts...) + if err != nil { + return nil, err + } + return db, nil +} + +func NewInMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) { + db, err := db.NewDB(ctx, memory.NewDatastore(ctx), dbopts...) + if err != nil { + return nil, err + } + return db, nil +} + +func NewBadgerFileDB(ctx context.Context, t testing.TB, dbopts ...db.Option) (client.DB, string, error) { + var dbPath string + switch { + case databaseDir != "": + // restarting database + dbPath = databaseDir + + case changeDetector.Enabled: + // change detector + dbPath = changeDetector.DatabaseDir(t) + + default: + // default test case + dbPath = t.TempDir() + } + + opts := &badgerds.Options{ + Options: badger.DefaultOptions(dbPath), + } + rootstore, err := badgerds.NewDatastore(dbPath, opts) + if err != nil { + return nil, "", err + } + db, err := db.NewDB(ctx, rootstore, dbopts...) + if err != nil { + return nil, "", err + } + return db, dbPath, err +} + +// setupDatabase returns the database implementation for the current +// testing state. The database type on the test state is used to +// select the datastore implementation to use. +func setupDatabase(s *state) (impl client.DB, path string, err error) { + dbopts := []db.Option{ + db.WithUpdateEvents(), + db.WithLensPoolSize(lensPoolSize), + } + + switch s.dbt { + case badgerIMType: + impl, err = NewBadgerMemoryDB(s.ctx, dbopts...) + + case badgerFileType: + impl, path, err = NewBadgerFileDB(s.ctx, s.t, dbopts...) + + case defraIMType: + impl, err = NewInMemoryDB(s.ctx, dbopts...) + + default: + err = fmt.Errorf("invalid database type: %v", s.dbt) + } + + if err != nil { + return nil, "", err + } + return +} diff --git a/tests/integration/events/simple/with_update_test.go b/tests/integration/events/simple/with_update_test.go index 1119e61313..f496678e28 100644 --- a/tests/integration/events/simple/with_update_test.go +++ b/tests/integration/events/simple/with_update_test.go @@ -64,14 +64,14 @@ func TestEventsSimpleWithUpdate(t *testing.T) { ExpectedUpdates: []testUtils.ExpectedUpdate{ { DocKey: immutable.Some(docKey1), - Cid: immutable.Some("bafybeifugdzbm7y3eihxe7wbldyesxeh6s6m62ghvwipphtld547rfi4cu"), + Cid: immutable.Some("bafybeifwfw3g4q6tagffdwq4orrouoosdlsc5rb67q2uj7oplkq7ax5ysm"), }, { DocKey: immutable.Some(docKey2), }, { DocKey: immutable.Some(docKey1), - Cid: immutable.Some("bafybeihqwcasy4mnwcyrnd2n5hdkg745vyj3qidporvamrhfkjqxihsmqm"), + Cid: immutable.Some("bafybeihdhik6m5o7cxei7f7ie6lnnbwnjsn42ne6cxab6g7dgi7k2uiiu4"), }, }, } diff --git a/tests/integration/events/utils.go b/tests/integration/events/utils.go index 652b9fb411..c461ed5cc3 100644 --- a/tests/integration/events/utils.go +++ b/tests/integration/events/utils.go @@ -56,9 +56,9 @@ type TestCase struct { type ExpectedUpdate struct { DocKey immutable.Option[string] // The expected Cid, as a string (results in much more readable errors) - Cid immutable.Option[string] - SchemaID immutable.Option[string] - Priority immutable.Option[uint64] + Cid immutable.Option[string] + SchemaRoot immutable.Option[string] + Priority immutable.Option[uint64] } const eventTimeout = 100 * time.Millisecond @@ -98,7 +98,7 @@ func ExecuteRequestTestCase( assertIfExpected(t, expectedEvent.Cid, update.Cid.String()) assertIfExpected(t, expectedEvent.DocKey, update.DocKey) assertIfExpected(t, expectedEvent.Priority, update.Priority) - assertIfExpected(t, expectedEvent.SchemaID, update.SchemaID) + assertIfExpected(t, expectedEvent.SchemaRoot, update.SchemaRoot) indexOfNextExpectedUpdate++ case <-closeTestRoutineChan: diff --git a/tests/integration/explain.go b/tests/integration/explain.go index 44c457c0f8..da7a1106e2 100644 --- a/tests/integration/explain.go +++ b/tests/integration/explain.go @@ -125,7 +125,7 @@ func executeExplainRequest( } for _, node := range getNodes(action.NodeID, s.nodes) { - result := node.DB.ExecRequest(s.ctx, action.Request) + result := node.ExecRequest(s.ctx, action.Request) assertExplainRequestResults(s, &result.GQL, action) } } diff --git a/tests/integration/explain/execute/create_test.go b/tests/integration/explain/execute/create_test.go index e8ab75d48a..bd99ab39a4 100644 --- a/tests/integration/explain/execute/create_test.go +++ b/tests/integration/explain/execute/create_test.go @@ -48,6 +48,7 @@ func TestExecuteExplainMutationRequestWithCreate(t *testing.T) { "iterations": uint64(1), "docFetches": uint64(1), "fieldFetches": uint64(1), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/delete_test.go b/tests/integration/explain/execute/delete_test.go index 13411b5f5e..e924ce334c 100644 --- a/tests/integration/explain/execute/delete_test.go +++ b/tests/integration/explain/execute/delete_test.go @@ -51,6 +51,7 @@ func TestExecuteExplainMutationRequestWithDeleteUsingID(t *testing.T) { "iterations": uint64(2), "docFetches": uint64(1), "fieldFetches": uint64(1), + "indexFetches": uint64(0), }, }, }, @@ -99,6 +100,7 @@ func TestExecuteExplainMutationRequestWithDeleteUsingFilter(t *testing.T) { "iterations": uint64(2), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/group_test.go b/tests/integration/explain/execute/group_test.go index 3b7e42c845..9d4dc096f9 100644 --- a/tests/integration/explain/execute/group_test.go +++ b/tests/integration/explain/execute/group_test.go @@ -59,6 +59,7 @@ func TestExecuteExplainRequestWithGroup(t *testing.T) { "iterations": uint64(4), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/query_deleted_docs_test.go b/tests/integration/explain/execute/query_deleted_docs_test.go index 7642873b7f..cb1ebbcaa7 100644 --- a/tests/integration/explain/execute/query_deleted_docs_test.go +++ b/tests/integration/explain/execute/query_deleted_docs_test.go @@ -56,6 +56,7 @@ func TestExecuteExplainQueryDeletedDocs(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/scan_test.go b/tests/integration/explain/execute/scan_test.go index 85bd64229c..a68f175015 100644 --- a/tests/integration/explain/execute/scan_test.go +++ b/tests/integration/explain/execute/scan_test.go @@ -67,6 +67,7 @@ func TestExecuteExplainRequestWithAllDocumentsMatching(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, @@ -109,6 +110,7 @@ func TestExecuteExplainRequestWithNoDocuments(t *testing.T) { "iterations": uint64(1), "docFetches": uint64(0), "fieldFetches": uint64(0), + "indexFetches": uint64(0), }, }, }, @@ -172,6 +174,7 @@ func TestExecuteExplainRequestWithSomeDocumentsMatching(t *testing.T) { "iterations": uint64(2), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, @@ -235,6 +238,7 @@ func TestExecuteExplainRequestWithDocumentsButNoMatches(t *testing.T) { "iterations": uint64(1), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/top_level_test.go b/tests/integration/explain/execute/top_level_test.go index 6afa9cbfb2..360c9a3d2c 100644 --- a/tests/integration/explain/execute/top_level_test.go +++ b/tests/integration/explain/execute/top_level_test.go @@ -70,6 +70,7 @@ func TestExecuteExplainTopLevelAverageRequest(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), }, }, }, @@ -153,6 +154,7 @@ func TestExecuteExplainTopLevelCountRequest(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, @@ -227,6 +229,7 @@ func TestExecuteExplainTopLevelSumRequest(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/type_join_test.go b/tests/integration/explain/execute/type_join_test.go index 8e26f423bb..eb1e187485 100644 --- a/tests/integration/explain/execute/type_join_test.go +++ b/tests/integration/explain/execute/type_join_test.go @@ -56,6 +56,13 @@ func TestExecuteExplainRequestWithAOneToOneJoin(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(2), + "docFetches": uint64(2), + "fieldFetches": uint64(2), + "indexFetches": uint64(0), }, }, }, @@ -115,6 +122,13 @@ func TestExecuteExplainWithMultipleOneToOneJoins(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(2), + "docFetches": uint64(2), + "fieldFetches": uint64(2), + "indexFetches": uint64(0), }, }, }, @@ -125,6 +139,13 @@ func TestExecuteExplainWithMultipleOneToOneJoins(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(2), + "docFetches": uint64(2), + "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, @@ -187,6 +208,13 @@ func TestExecuteExplainWithTwoLevelDeepNestedJoins(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(2), + "docFetches": uint64(2), + "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/update_test.go b/tests/integration/explain/execute/update_test.go index d9469e4b4e..a1fa92b091 100644 --- a/tests/integration/explain/execute/update_test.go +++ b/tests/integration/explain/execute/update_test.go @@ -59,6 +59,7 @@ func TestExecuteExplainMutationRequestWithUpdateUsingIDs(t *testing.T) { "iterations": uint64(6), "docFetches": uint64(4), "fieldFetches": uint64(8), + "indexFetches": uint64(0), }, }, }, @@ -116,6 +117,7 @@ func TestExecuteExplainMutationRequestWithUpdateUsingFilter(t *testing.T) { "iterations": uint64(4), "docFetches": uint64(4), "fieldFetches": uint64(6), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/with_average_test.go b/tests/integration/explain/execute/with_average_test.go index a3070e8c42..9e906c475d 100644 --- a/tests/integration/explain/execute/with_average_test.go +++ b/tests/integration/explain/execute/with_average_test.go @@ -56,6 +56,7 @@ func TestExecuteExplainAverageRequestOnArrayField(t *testing.T) { "iterations": uint64(4), "docFetches": uint64(3), "fieldFetches": uint64(5), + "indexFetches": uint64(0), }, }, }, @@ -116,6 +117,13 @@ func TestExplainExplainAverageRequestOnJoinedField(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(5), + "docFetches": uint64(6), + "fieldFetches": uint64(12), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/with_count_test.go b/tests/integration/explain/execute/with_count_test.go index 236d0bf8af..4a30b9f52a 100644 --- a/tests/integration/explain/execute/with_count_test.go +++ b/tests/integration/explain/execute/with_count_test.go @@ -57,6 +57,13 @@ func TestExecuteExplainRequestWithCountOnOneToManyRelation(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(5), + "docFetches": uint64(6), + "fieldFetches": uint64(14), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/with_limit_test.go b/tests/integration/explain/execute/with_limit_test.go index 9a65ec1ec3..88a1666ca3 100644 --- a/tests/integration/explain/execute/with_limit_test.go +++ b/tests/integration/explain/execute/with_limit_test.go @@ -51,6 +51,7 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParent(t *testing.T) { "iterations": uint64(2), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), }, }, }, @@ -107,6 +108,13 @@ func TestExecuteExplainRequestWithBothLimitAndOffsetOnParentAndLimitOnChild(t *t "iterations": uint64(2), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(2), + "docFetches": uint64(4), + "fieldFetches": uint64(6), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/with_order_test.go b/tests/integration/explain/execute/with_order_test.go index d5b7ccfaed..9155523b20 100644 --- a/tests/integration/explain/execute/with_order_test.go +++ b/tests/integration/explain/execute/with_order_test.go @@ -52,6 +52,7 @@ func TestExecuteExplainRequestWithOrderFieldOnParent(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), }, }, }, @@ -135,6 +136,7 @@ func TestExecuteExplainRequestWithMultiOrderFieldsOnParent(t *testing.T) { "iterations": uint64(5), "docFetches": uint64(4), "fieldFetches": uint64(8), + "indexFetches": uint64(0), }, }, }, @@ -189,6 +191,13 @@ func TestExecuteExplainRequestWithOrderFieldOnChild(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(5), + "docFetches": uint64(6), + "fieldFetches": uint64(9), + "indexFetches": uint64(0), }, }, }, @@ -246,6 +255,13 @@ func TestExecuteExplainRequestWithOrderFieldOnBothParentAndChild(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(4), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(5), + "docFetches": uint64(6), + "fieldFetches": uint64(9), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain/execute/with_sum_test.go b/tests/integration/explain/execute/with_sum_test.go index c6df56c2e0..c37e3d0309 100644 --- a/tests/integration/explain/execute/with_sum_test.go +++ b/tests/integration/explain/execute/with_sum_test.go @@ -52,6 +52,7 @@ func TestExecuteExplainRequestWithSumOfInlineArrayField(t *testing.T) { "iterations": uint64(4), "docFetches": uint64(3), "fieldFetches": uint64(5), + "indexFetches": uint64(0), }, }, }, @@ -110,6 +111,13 @@ func TestExecuteExplainRequestSumOfRelatedOneToManyField(t *testing.T) { "iterations": uint64(3), "docFetches": uint64(2), "fieldFetches": uint64(2), + "indexFetches": uint64(0), + }, + "subTypeScanNode": dataMap{ + "iterations": uint64(5), + "docFetches": uint64(6), + "fieldFetches": uint64(9), + "indexFetches": uint64(0), }, }, }, diff --git a/tests/integration/explain_result_asserter.go b/tests/integration/explain_result_asserter.go new file mode 100644 index 0000000000..30126d4fe4 --- /dev/null +++ b/tests/integration/explain_result_asserter.go @@ -0,0 +1,162 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package tests + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/sourcenetwork/immutable" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +const ( + iterationsProp = "iterations" + docFetchesProp = "docFetches" + fieldFetchesProp = "fieldFetches" + indexFetchesProp = "indexFetches" +) + +type dataMap = map[string]any + +// ExplainResultAsserter is a helper for asserting the result of an explain query. +// It allows asserting on a selected set of properties. +type ExplainResultAsserter struct { + iterations immutable.Option[int] + docFetches immutable.Option[int] + fieldFetches immutable.Option[int] + indexFetches immutable.Option[int] + filterMatches immutable.Option[int] + sizeOfResults immutable.Option[int] + planExecutions immutable.Option[uint64] +} + +func readNumberProp(t *testing.T, val any, prop string) uint64 { + switch v := val.(type) { + case uint64: + return v + case json.Number: + n, err := v.Int64() + require.NoError(t, err, fmt.Sprintf("Expected %s property to be a uint64", prop)) + return uint64(n) + default: + require.Fail(t, fmt.Sprintf("Unexpected type for %s property: %T", prop, val)) + } + return 0 +} + +func (a *ExplainResultAsserter) Assert(t *testing.T, result []dataMap) { + require.Len(t, result, 1, "Expected len(result) = 1, got %d", len(result)) + explainNode, ok := result[0]["explain"].(dataMap) + require.True(t, ok, "Expected explain none") + assert.Equal(t, explainNode["executionSuccess"], true, "Expected executionSuccess property") + if a.sizeOfResults.HasValue() { + actual := explainNode["sizeOfResult"] + assert.Equal(t, actual, a.sizeOfResults.Value(), + "Expected %d sizeOfResult, got %d", a.sizeOfResults.Value(), actual) + } + if a.planExecutions.HasValue() { + actual := explainNode["planExecutions"] + assert.Equal(t, actual, a.planExecutions.Value(), + "Expected %d planExecutions, got %d", a.planExecutions.Value(), actual) + } + selectTopNode, ok := explainNode["selectTopNode"].(dataMap) + require.True(t, ok, "Expected selectTopNode") + selectNode, ok := selectTopNode["selectNode"].(dataMap) + require.True(t, ok, "Expected selectNode") + + if a.filterMatches.HasValue() { + filterMatches, hasFilterMatches := selectNode["filterMatches"] + require.True(t, hasFilterMatches, "Expected filterMatches property") + assert.Equal(t, filterMatches, uint64(a.filterMatches.Value()), + "Expected %d filterMatches, got %d", a.filterMatches, filterMatches) + } + + scanNode, ok := selectNode["scanNode"].(dataMap) + subScanNode := map[string]any{} + if indexJoin, isJoin := selectNode["typeIndexJoin"].(dataMap); isJoin { + scanNode, ok = indexJoin["scanNode"].(dataMap) + subScanNode, _ = indexJoin["subTypeScanNode"].(dataMap) + } + require.True(t, ok, "Expected scanNode") + + getScanNodesProp := func(prop string) uint64 { + val, hasProp := scanNode[prop] + require.True(t, hasProp, fmt.Sprintf("Expected %s property", prop)) + actual := readNumberProp(t, val, prop) + if subScanNode[prop] != nil { + actual += readNumberProp(t, subScanNode[prop], "subTypeScanNode."+prop) + } + return actual + } + + if a.iterations.HasValue() { + actual := getScanNodesProp(iterationsProp) + assert.Equal(t, actual, uint64(a.iterations.Value()), + "Expected %d iterations, got %d", a.iterations.Value(), actual) + } + if a.docFetches.HasValue() { + actual := getScanNodesProp(docFetchesProp) + assert.Equal(t, actual, uint64(a.docFetches.Value()), + "Expected %d docFetches, got %d", a.docFetches.Value(), actual) + } + if a.fieldFetches.HasValue() { + actual := getScanNodesProp(fieldFetchesProp) + assert.Equal(t, actual, uint64(a.fieldFetches.Value()), + "Expected %d fieldFetches, got %d", a.fieldFetches.Value(), actual) + } + if a.indexFetches.HasValue() { + actual := getScanNodesProp(indexFetchesProp) + assert.Equal(t, actual, uint64(a.indexFetches.Value()), + "Expected %d indexFetches, got %d", a.indexFetches.Value(), actual) + } +} + +func (a *ExplainResultAsserter) WithIterations(iterations int) *ExplainResultAsserter { + a.iterations = immutable.Some[int](iterations) + return a +} + +func (a *ExplainResultAsserter) WithDocFetches(docFetches int) *ExplainResultAsserter { + a.docFetches = immutable.Some[int](docFetches) + return a +} + +func (a *ExplainResultAsserter) WithFieldFetches(fieldFetches int) *ExplainResultAsserter { + a.fieldFetches = immutable.Some[int](fieldFetches) + return a +} + +func (a *ExplainResultAsserter) WithIndexFetches(indexFetches int) *ExplainResultAsserter { + a.indexFetches = immutable.Some[int](indexFetches) + return a +} + +func (a *ExplainResultAsserter) WithFilterMatches(filterMatches int) *ExplainResultAsserter { + a.filterMatches = immutable.Some[int](filterMatches) + return a +} + +func (a *ExplainResultAsserter) WithSizeOfResults(sizeOfResults int) *ExplainResultAsserter { + a.sizeOfResults = immutable.Some[int](sizeOfResults) + return a +} + +func (a *ExplainResultAsserter) WithPlanExecutions(planExecutions uint64) *ExplainResultAsserter { + a.planExecutions = immutable.Some[uint64](planExecutions) + return a +} + +func NewExplainAsserter() *ExplainResultAsserter { + return &ExplainResultAsserter{} +} diff --git a/tests/integration/index/create_drop_test.go b/tests/integration/index/create_drop_test.go index 43635116e7..e9f27bfe5e 100644 --- a/tests/integration/index/create_drop_test.go +++ b/tests/integration/index/create_drop_test.go @@ -52,7 +52,7 @@ func TestIndexDrop_ShouldNotHinderQuerying(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, diff --git a/tests/integration/index/create_test.go b/tests/integration/index/create_test.go index 15cbac530e..692b329079 100644 --- a/tests/integration/index/create_test.go +++ b/tests/integration/index/create_test.go @@ -49,7 +49,7 @@ func TestIndexCreateWithCollection_ShouldNotHinderQuerying(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -96,7 +96,7 @@ func TestIndexCreate_ShouldNotHinderQuerying(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, diff --git a/tests/integration/index/docs.go b/tests/integration/index/docs.go new file mode 100644 index 0000000000..505eadf98d --- /dev/null +++ b/tests/integration/index/docs.go @@ -0,0 +1,456 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +type docsCollection struct { + colName string + docs []map[string]any +} + +func getUserDocs() docsCollection { + return docsCollection{ + colName: "User", + docs: []map[string]any{ + { + "name": "Shahzad", + "age": 20, + "verified": false, + "email": "shahzad@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "iPhone Xs", + "year": 2022, + "type": "phone", + "specs": map[string]any{ + "CPU": 2.2, + "Chip": "Intel i3", + "RAM": 8, + "Storage": 512, + "OS": "iOS 12", + }, + }, + { + "model": "MacBook Pro", + "year": 2020, + "type": "laptop", + "specs": map[string]any{ + "CPU": 2.4, + "Chip": "Intel i5", + "RAM": 16, + "Storage": 2048, + "OS": "Yosemite", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 4635, + "city": "Montreal", + "country": "Canada", + "street": "Queen Mary Rd", + }, + }, + { + "name": "Bruno", + "age": 23, + "verified": true, + "email": "bruno@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{}, + }, + "address": map[string]any{ + "postalCode": 10001, + "city": "New York", + "country": "USA", + "street": "5th Ave", + }, + }, + { + "name": "Roy", + "age": 44, + "verified": true, + "email": "roy@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{}, + }, + "address": map[string]any{ + "postalCode": 90028, + "city": "Los Angeles", + "country": "USA", + "street": "Hollywood Blvd", + }, + }, + { + "name": "Fred", + "age": 28, + "verified": false, + "email": "fred@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "Samsung Galaxy S20", + "year": 2022, + "type": "phone", + "specs": map[string]any{ + "CPU": 2.0, + "Chip": "AMD Athlon", + "RAM": 8, + "Storage": 256, + "OS": "Android 11", + }, + }, + { + "model": "Lenovo ThinkPad", + "year": 2020, + "type": "laptop", + "specs": map[string]any{ + "CPU": 1.9, + "Chip": "AMD Ryzen", + "RAM": 8, + "Storage": 1024, + "OS": "Windows 10", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 6512, + "city": "Montreal", + "country": "Canada", + "street": "Park Ave", + }, + }, + { + "name": "John", + "age": 30, + "verified": false, + "email": "john@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "Google Pixel 5", + "year": 2022, + "type": "phone", + "specs": map[string]any{ + "CPU": 2.4, + "Chip": "Octa-core", + "RAM": 16, + "Storage": 512, + "OS": "Android 11", + }, + }, + { + "model": "Asus Vivobook", + "year": 2022, + "type": "laptop", + "specs": map[string]any{ + "CPU": 2.9, + "Chip": "Intel i7", + "RAM": 64, + "Storage": 2048, + "OS": "Windows 10", + }, + }, + { + "model": "Commodore 64", + "year": 1982, + "type": "computer", + "specs": map[string]any{ + "CPU": 0.1, + "Chip": "MOS 6510", + "RAM": 1, + "Storage": 1, + "OS": "Commodore BASIC 2.0", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 690, + "city": "Montreal", + "country": "Canada", + "street": "Notre-Dame St W", + }, + }, + { + "name": "Islam", + "age": 32, + "verified": false, + "email": "islam@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "iPhone 12s", + "year": 2018, + "type": "phone", + "specs": map[string]any{ + "CPU": 2.1, + "Chip": "A11 Bionic", + "RAM": 8, + "Storage": 1024, + "OS": "iOS 14", + }, + }, + { + "model": "MacBook Pro", + "year": 2023, + "type": "laptop", + "specs": map[string]any{ + "CPU": 2.6, + "Chip": "Apple M2 Max", + "RAM": 32, + "Storage": 1024, + "OS": "Sonoma 14", + }, + }, + { + "model": "iPad Pro", + "year": 2020, + "type": "tablet", + "specs": map[string]any{ + "CPU": 2.1, + "Chip": "Intel i5", + "RAM": 8, + "Storage": 512, + "OS": "iOS 14", + }, + }, + { + "model": "Playstation 5", + "year": 2022, + "type": "game_console", + "specs": map[string]any{ + "CPU": 3.5, + "Chip": "AMD Zen 2", + "RAM": 16, + "Storage": 825, + "OS": "FreeBSD", + }, + }, + { + "model": "Nokia 7610", + "year": 2003, + "type": "phone", + "specs": map[string]any{ + "CPU": 1.8, + "Chip": "Cortex A710", + "RAM": 12, + "Storage": 2, + "OS": "Symbian 7.0", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 80804, + "city": "Munich", + "country": "Germany", + "street": "Leopold Str", + }, + }, + { + "name": "Andy", + "age": 33, + "verified": true, + "email": "andy@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "Xiaomi Phone", + "year": 2022, + "type": "phone", + "specs": map[string]any{ + "CPU": 1.6, + "Chip": "AMD Octen", + "RAM": 8, + "Storage": 512, + "OS": "Android 11", + }, + }, + { + "model": "Alienware x16", + "year": 2018, + "type": "laptop", + "specs": map[string]any{ + "CPU": 3.2, + "Chip": "Intel i7", + "RAM": 64, + "Storage": 2048, + "OS": "Windows 9", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 101103, + "city": "London", + "country": "UK", + "street": "Baker St", + }, + }, + { + "name": "Addo", + "age": 42, + "verified": true, + "email": "addo@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "iPhone 10", + "year": 2021, + "type": "phone", + "specs": map[string]any{ + "CPU": 1.8, + "Chip": "Intel i3", + "RAM": 8, + "Storage": 256, + "OS": "iOS 12", + }, + }, + { + "model": "Acer Aspire 5", + "year": 2020, + "type": "laptop", + "specs": map[string]any{ + "CPU": 2.0, + "Chip": "Intel i5", + "RAM": 16, + "Storage": 512, + "OS": "Windows 10", + }, + }, + { + "model": "HyperX Headset", + "year": 2014, + "type": "headset", + "specs": map[string]any{ + "CPU": nil, + "Chip": nil, + "RAM": nil, + "Storage": nil, + "OS": nil, + }, + }, + { + "model": "Playstation 5", + "year": 2021, + "type": "game_console", + "specs": map[string]any{ + "CPU": 3.5, + "Chip": "AMD Zen 2", + "RAM": 16, + "Storage": 825, + "OS": "FreeBSD", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 403, + "city": "Ottawa", + "country": "Canada", + "street": "Bank St", + }, + }, + { + "name": "Keenan", + "age": 48, + "verified": true, + "email": "keenan@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "iPhone 13", + "year": 2022, + "type": "phone", + "specs": map[string]any{ + "CPU": 2.3, + "Chip": "M1", + "RAM": 8, + "Storage": 1024, + "OS": "iOS 14", + }, + }, + { + "model": "MacBook Pro", + "year": 2017, + "type": "laptop", + "specs": map[string]any{ + "CPU": 2.0, + "Chip": "A11 Bionic", + "RAM": 16, + "Storage": 512, + "OS": "Ventura", + }, + }, + { + "model": "iPad Mini", + "year": 2015, + "type": "tablet", + "specs": map[string]any{ + "CPU": 1.9, + "Chip": "Intel i3", + "RAM": 8, + "Storage": 1024, + "OS": "iOS 12", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 1600, + "city": "San Francisco", + "country": "USA", + "street": "Market St", + }, + }, + { + "name": "Chris", + "age": 55, + "verified": true, + "email": "chris@gmail.com", + "devices": docsCollection{ + colName: "Device", + docs: []map[string]any{ + { + "model": "Walkman", + "year": 2000, + "type": "phone", + "specs": map[string]any{ + "CPU": 1.8, + "Chip": "Cortex-A53 ", + "RAM": 8, + "Storage": 256, + "OS": "Android 11", + }, + }, + }, + }, + "address": map[string]any{ + "postalCode": 11680, + "city": "Toronto", + "country": "Canada", + "street": "Yonge St", + }, + }, + }, + } +} diff --git a/tests/integration/index/drop_test.go b/tests/integration/index/drop_test.go index ae2984854d..ab03e1df50 100644 --- a/tests/integration/index/drop_test.go +++ b/tests/integration/index/drop_test.go @@ -53,7 +53,7 @@ func TestIndexDrop_IfIndexDoesNotExist_ReturnError(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, diff --git a/tests/integration/index/query_performance_test.go b/tests/integration/index/query_performance_test.go new file mode 100644 index 0000000000..eec8a13f4b --- /dev/null +++ b/tests/integration/index/query_performance_test.go @@ -0,0 +1,86 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "fmt" + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func generateDocsForCollection(colIndex, count int) []any { + result := make([]any, 0, count) + for i := 0; i < count; i++ { + result = append(result, testUtils.CreateDoc{ + CollectionID: colIndex, + Doc: fmt.Sprintf(`{ + "name": "name-%d", + "age": %d, + "email": "email%d@gmail.com" + }`, i, i%100, i), + }) + } + return result +} + +func TestQueryPerformance_Simple(t *testing.T) { + const benchReps = 10 + const numDocs = 500 + + test1 := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{Schema: ` + type User { + name: String + age: Int + email: String + } + `}, + testUtils.SchemaUpdate{ + Schema: ` + type IndexedUser { + name: String + age: Int @index + email: String + } + `, + }, + generateDocsForCollection(0, numDocs), + generateDocsForCollection(1, numDocs), + testUtils.Benchmark{ + Reps: benchReps, + BaseCase: testUtils.Request{Request: ` + query { + User(filter: {age: {_eq: 33}}) { + name + age + email + } + }`, + }, + OptimizedCase: testUtils.Request{Request: ` + query { + IndexedUser(filter: {age: {_eq: 33}}) { + name + age + email + } + }`, + }, + FocusClients: []testUtils.ClientType{testUtils.GoClientType}, + Factor: 5, + }, + }, + } + + testUtils.ExecuteTestCase(t, test1) +} diff --git a/tests/integration/index/query_with_index_combined_filter_test.go b/tests/integration/index/query_with_index_combined_filter_test.go new file mode 100644 index 0000000000..e5673d1ccf --- /dev/null +++ b/tests/integration/index/query_with_index_combined_filter_test.go @@ -0,0 +1,87 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryWithIndex_IfIndexFilterWithRegular_ShouldFilter(t *testing.T) { + req := `query { + User(filter: { + name: {_in: ["Fred", "Islam", "Addo"]}, + age: {_gt: 40} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Combination of a filter on regular and of an indexed field", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Addo"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(3).WithFieldFetches(6).WithIndexFetches(3), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_IfMultipleIndexFiltersWithRegular_ShouldFilter(t *testing.T) { + req := `query { + User(filter: { + name: {_like: "%a%"}, + age: {_gt: 30}, + email: {_like: "%m@gmail.com"} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Combination of a filter on regular and of 2 indexed fields", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int @index + email: String + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Islam"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(6).WithFieldFetches(18), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/query_with_index_only_filter_test.go b/tests/integration/index/query_with_index_only_filter_test.go new file mode 100644 index 0000000000..098163b307 --- /dev/null +++ b/tests/integration/index/query_with_index_only_filter_test.go @@ -0,0 +1,534 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryWithIndex_WithNonIndexedFields_ShouldFetchAllOfThem(t *testing.T) { + req := `query { + User(filter: {name: {_eq: "Islam"}}) { + name + age + } + }` + test := testUtils.TestCase{ + Description: "If there are non-indexed fields in the query, they should be fetched", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{{ + "name": "Islam", + "age": int64(32), + }}, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(1), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithEqualFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_eq: "Islam"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _eq filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Islam"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(1).WithIndexFetches(1), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_IfSeveralDocsWithEqFilter_ShouldFetchAll(t *testing.T) { + req := `query { + User(filter: {name: {_eq: "Islam"}}) { + age + } + }` + test := testUtils.TestCase{ + Description: "If there are several docs matching _eq filter, they should be fetched", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int + } + `), + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "Islam", + "age": 18 + }`, + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"age": int64(32)}, + {"age": int64(18)}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(2), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithGreaterThanFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_gt: 48}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _gt filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Chris"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithGreaterOrEqualFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_ge: 48}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _ge filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Keenan"}, + {"name": "Chris"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithLessThanFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_lt: 22}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _lt filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithLessOrEqualFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_le: 23}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _le filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Shahzad"}, + {"name": "Bruno"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithNotEqualFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_ne: "Islam"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _ne filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Roy"}, + {"name": "Addo"}, + {"name": "Andy"}, + {"name": "Fred"}, + {"name": "John"}, + {"name": "Bruno"}, + {"name": "Chris"}, + {"name": "Keenan"}, + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(9).WithFieldFetches(9).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithInFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_in: [20, 33]}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _in filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Shahzad"}, + {"name": "Andy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(2), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_IfSeveralDocsWithInFilter_ShouldFetchAll(t *testing.T) { + req := `query { + User(filter: {name: {_in: ["Islam"]}}) { + age + } + }` + test := testUtils.TestCase{ + Description: "If there are several docs matching _in filter, they should be fetched", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int + } + `), + testUtils.CreateDoc{ + CollectionID: 0, + Doc: `{ + "name": "Islam", + "age": 18 + }`, + }, + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"age": int64(32)}, + {"age": int64(18)}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(2), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithNotInFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {age: {_nin: [20, 23, 28, 33, 42, 55]}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _nin filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int @index + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "John"}, + {"name": "Islam"}, + {"name": "Roy"}, + {"name": "Keenan"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(4).WithFieldFetches(8).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithLikeFilter_ShouldFetch(t *testing.T) { + req1 := `query { + User(filter: {email: {_like: "a%"}}) { + name + } + }` + req2 := `query { + User(filter: {email: {_like: "%d@gmail.com"}}) { + name + } + }` + req3 := `query { + User(filter: {email: {_like: "%e%"}}) { + name + } + }` + req4 := `query { + User(filter: {email: {_like: "fred@gmail.com"}}) { + name + } + }` + req5 := `query { + User(filter: {email: {_like: "a%@gmail.com"}}) { + name + } + }` + req6 := `query { + User(filter: {email: {_like: "a%com%m"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _like filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + email: String @index + } + `), + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Addo"}, + {"name": "Andy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "Fred"}, + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req3, + Results: []map[string]any{ + {"name": "Fred"}, + {"name": "Keenan"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req3), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req4, + Results: []map[string]any{ + {"name": "Fred"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req4), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(1).WithFieldFetches(2).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req5, + Results: []map[string]any{ + {"name": "Addo"}, + {"name": "Andy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req5), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(4).WithIndexFetches(10), + }, + testUtils.Request{ + Request: req6, + Results: []map[string]any{}, + }, + testUtils.Request{ + Request: makeExplainQuery(req6), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(0).WithFieldFetches(0).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndex_WithNotLikeFilter_ShouldFetch(t *testing.T) { + req := `query { + User(filter: {name: {_nlike: "%h%"}}) { + name + } + }` + test := testUtils.TestCase{ + Description: "Test index filtering with _nlike filter", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String @index + age: Int + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Roy"}, + {"name": "Addo"}, + {"name": "Andy"}, + {"name": "Fred"}, + {"name": "Bruno"}, + {"name": "Islam"}, + {"name": "Keenan"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(7).WithFieldFetches(7).WithIndexFetches(10), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/query_with_relation_filter_test.go b/tests/integration/index/query_with_relation_filter_test.go new file mode 100644 index 0000000000..4a217e931c --- /dev/null +++ b/tests/integration/index/query_with_relation_filter_test.go @@ -0,0 +1,310 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestQueryWithIndexOnOneToManyRelation_IfFilterOnIndexedRelation_ShouldFilter(t *testing.T) { + req1 := `query { + User(filter: { + devices: {model: {_eq: "MacBook Pro"}} + }) { + name + } + }` + req2 := `query { + User(filter: { + devices: {model: {_eq: "iPhone 10"}} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed relation field in 1-N relation", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int + devices: [Device] + } + + type Device { + model: String @index + owner: User + } + `), + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Islam"}, + {"name": "Shahzad"}, + {"name": "Keenan"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(6).WithFieldFetches(9).WithIndexFetches(3), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "Addo"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndexOnOneToOnesSecondaryRelation_IfFilterOnIndexedRelation_ShouldFilter(t *testing.T) { + req1 := `query { + User(filter: { + address: {city: {_eq: "Munich"}} + }) { + name + } + }` + req2 := `query { + User(filter: { + address: {city: {_eq: "Montreal"}} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed secondary relation field in 1-1 relation", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int + address: Address + } + + type Address { + user: User + city: String @index + } + `), + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Islam"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "Shahzad"}, + {"name": "Fred"}, + {"name": "John"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(6).WithFieldFetches(9).WithIndexFetches(3), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedFieldOfRelation_ShouldFilter(t *testing.T) { + req1 := `query { + User(filter: { + address: {city: {_eq: "London"}} + }) { + name + } + }` + req2 := `query { + User(filter: { + address: {city: {_eq: "Montreal"}} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed field of primary relation in 1-1 relation", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int + address: Address @primary + } + + type Address { + user: User + city: String @index + street: String + } + `), + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + {"name": "Andy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(11).WithFieldFetches(12).WithIndexFetches(1), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + {"name": "John"}, + {"name": "Fred"}, + {"name": "Shahzad"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(15).WithFieldFetches(18).WithIndexFetches(3), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndexOnOneToOnePrimaryRelation_IfFilterOnIndexedRelationWhileIndexedForeignField_ShouldFilter(t *testing.T) { + req := `query { + User(filter: { + address: {city: {_eq: "London"}} + }) { + name + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed field of primary relation while having indexed foreign field in 1-1 relation", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int + address: Address @primary @index + } + + type Address { + user: User + city: String @index + street: String + } + `), + testUtils.Request{ + Request: req, + Results: []map[string]any{ + {"name": "Andy"}, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(11).WithFieldFetches(12).WithIndexFetches(1), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestQueryWithIndexOnOneToTwoRelation_IfFilterOnIndexedRelation_ShouldFilter(t *testing.T) { + req1 := `query { + User(filter: { + address: {city: {_eq: "Munich"}} + }) { + name + address { + city + } + } + }` + req2 := `query { + User(filter: { + devices: {model: {_eq: "Walkman"}} + }) { + name + devices { + model + } + } + }` + test := testUtils.TestCase{ + Description: "Filter on indexed relation field in 1-1 and 1-N relations", + Actions: []any{ + createSchemaWithDocs(` + type User { + name: String + age: Int + address: Address + devices: [Device] + } + + type Device { + model: String @index + owner: User + } + + type Address { + user: User + city: String @index + } + `), + testUtils.Request{ + Request: req1, + Results: []map[string]any{ + { + "name": "Islam", + "address": map[string]any{ + "city": "Munich", + }, + }, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req1), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + }, + testUtils.Request{ + Request: req2, + Results: []map[string]any{ + { + "name": "Chris", + "devices": map[string]any{ + "model": "Walkman", + }, + }, + }, + }, + testUtils.Request{ + Request: makeExplainQuery(req2), + Asserter: testUtils.NewExplainAsserter().WithDocFetches(2).WithFieldFetches(3).WithIndexFetches(1), + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/index/utils.go b/tests/integration/index/utils.go new file mode 100644 index 0000000000..bb6cb89f14 --- /dev/null +++ b/tests/integration/index/utils.go @@ -0,0 +1,290 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package index + +import ( + "fmt" + "strings" + + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + "github.com/sourcenetwork/defradb/client/request" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +// createSchemaWithDocs returns UpdateSchema action and CreateDoc actions +// with the documents that match the schema. +// The schema is parsed to get the list of properties, and the docs +// are created with the same properties. +// This allows us to have only one large list of docs with predefined +// properties, and create schemas with different properties from it. +func createSchemaWithDocs(schema string) []any { + userDocs := getUserDocs() + resultActions := make([]any, 0, len(userDocs.docs)+1) + resultActions = append(resultActions, testUtils.SchemaUpdate{Schema: schema}) + parser := schemaParser{} + typeDefs := parser.Parse(schema) + generator := createDocGenerator{types: typeDefs} + for _, doc := range userDocs.docs { + actions := generator.GenerateDocs(doc, userDocs.colName) + resultActions = append(resultActions, actions...) + } + return resultActions +} + +type createDocGenerator struct { + types map[string]typeDefinition +} + +func createDocJSON(doc map[string]any, typeDef *typeDefinition) string { + sb := strings.Builder{} + for propName := range doc { + format := `"%s": %v` + if _, isStr := doc[propName].(string); isStr { + format = `"%s": "%v"` + } + if sb.Len() == 0 { + sb.WriteString("{\n") + } else { + sb.WriteString(",\n") + } + sb.WriteString(fmt.Sprintf(format, propName, doc[propName])) + } + sb.WriteString("\n}") + return sb.String() +} + +func toRequestedDoc(doc map[string]any, typeDef *typeDefinition) map[string]any { + result := make(map[string]any) + for _, prop := range typeDef.props { + if prop.isRelation { + continue + } + result[prop.name] = doc[prop.name] + } + for name, val := range doc { + if strings.HasSuffix(name, request.RelatedObjectID) { + result[name] = val + } + } + return result +} + +func (this *createDocGenerator) generatePrimary( + doc map[string]any, + typeDef *typeDefinition, +) (map[string]any, []any) { + result := []any{} + requested := toRequestedDoc(doc, typeDef) + for _, prop := range typeDef.props { + if prop.isRelation { + if _, hasProp := doc[prop.name]; hasProp { + if prop.isPrimary.Value() { + subType := this.types[prop.typeStr] + subDoc := toRequestedDoc(doc[prop.name].(map[string]any), &subType) + jsonSubDoc := createDocJSON(subDoc, &subType) + clientSubDoc, err := client.NewDocFromJSON([]byte(jsonSubDoc)) + if err != nil { + panic("Failed to create doc from JSON: " + err.Error()) + } + requested[prop.name+request.RelatedObjectID] = clientSubDoc.Key().String() + result = append(result, testUtils.CreateDoc{CollectionID: subType.index, Doc: jsonSubDoc}) + } + } + } + } + return requested, result +} + +func (this *createDocGenerator) GenerateDocs(doc map[string]any, typeName string) []any { + typeDef := this.types[typeName] + + requested, result := this.generatePrimary(doc, &typeDef) + docStr := createDocJSON(requested, &typeDef) + + result = append(result, testUtils.CreateDoc{CollectionID: typeDef.index, Doc: docStr}) + + var docKey string + for _, prop := range typeDef.props { + if prop.isRelation { + if _, hasProp := doc[prop.name]; hasProp { + if !prop.isPrimary.Value() { + if docKey == "" { + clientDoc, err := client.NewDocFromJSON([]byte(docStr)) + if err != nil { + panic("Failed to create doc from JSON: " + err.Error()) + } + docKey = clientDoc.Key().String() + } + actions := this.generateSecondaryDocs(doc, typeName, &prop, docKey) + result = append(result, actions...) + } + } + } + } + return result +} + +func (this *createDocGenerator) generateSecondaryDocs( + primaryDoc map[string]any, + primaryTypeName string, + relProp *propDefinition, + primaryDocKey string, +) []any { + result := []any{} + relTypeDef := this.types[relProp.typeStr] + primaryPropName := "" + for _, relDocProp := range relTypeDef.props { + if relDocProp.typeStr == primaryTypeName && relDocProp.isPrimary.Value() { + primaryPropName = relDocProp.name + request.RelatedObjectID + switch relVal := primaryDoc[relProp.name].(type) { + case docsCollection: + for _, relDoc := range relVal.docs { + relDoc[primaryPropName] = primaryDocKey + actions := this.GenerateDocs(relDoc, relTypeDef.name) + result = append(result, actions...) + } + case map[string]any: + relVal[primaryPropName] = primaryDocKey + actions := this.GenerateDocs(relVal, relTypeDef.name) + result = append(result, actions...) + } + } + } + return result +} + +type propDefinition struct { + name string + typeStr string + isArray bool + isRelation bool + isPrimary immutable.Option[bool] +} + +type typeDefinition struct { + name string + index int + props map[string]propDefinition +} + +type schemaParser struct { + types map[string]typeDefinition + schemaLines []string + firstRelationType string + currentTypeDef typeDefinition + relationTypesMap map[string]map[string]string +} + +func (p *schemaParser) Parse(schema string) map[string]typeDefinition { + p.types = make(map[string]typeDefinition) + p.relationTypesMap = make(map[string]map[string]string) + p.schemaLines = strings.Split(schema, "\n") + p.findTypes() + + for _, line := range p.schemaLines { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "type ") { + typeNameEndPos := strings.Index(line[5:], " ") + typeName := strings.TrimSpace(line[5 : 5+typeNameEndPos]) + p.currentTypeDef = p.types[typeName] + continue + } + if strings.HasPrefix(line, "}") { + p.types[p.currentTypeDef.name] = p.currentTypeDef + continue + } + pos := strings.Index(line, ":") + if pos != -1 { + p.defineProp(line, pos) + } + } + p.resolvePrimaryRelations() + return p.types +} + +func (p *schemaParser) findTypes() { + typeIndex := 0 + for _, line := range p.schemaLines { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, "type ") { + typeNameEndPos := strings.Index(line[5:], " ") + typeName := strings.TrimSpace(line[5 : 5+typeNameEndPos]) + p.types[typeName] = typeDefinition{name: typeName, index: typeIndex, props: make(map[string]propDefinition)} + typeIndex++ + } + } +} + +func (p *schemaParser) defineProp(line string, pos int) { + prop := propDefinition{name: line[:pos]} + prop.typeStr = strings.TrimSpace(line[pos+1:]) + typeEndPos := strings.Index(prop.typeStr, " ") + if typeEndPos != -1 { + prop.typeStr = prop.typeStr[:typeEndPos] + } + if prop.typeStr[0] == '[' { + prop.isArray = true + prop.typeStr = prop.typeStr[1 : len(prop.typeStr)-1] + } + if _, isRelation := p.types[prop.typeStr]; isRelation { + prop.isRelation = true + if prop.isArray { + prop.isPrimary = immutable.Some(false) + } else if strings.Contains(line[pos+len(prop.typeStr)+2:], "@primary") { + prop.isPrimary = immutable.Some(true) + } + relMap := p.relationTypesMap[prop.typeStr] + if relMap == nil { + relMap = make(map[string]string) + } + relMap[prop.name] = p.currentTypeDef.name + p.relationTypesMap[prop.typeStr] = relMap + if p.firstRelationType == "" { + p.firstRelationType = p.currentTypeDef.name + } + } + p.currentTypeDef.props[prop.name] = prop +} + +func (p *schemaParser) resolvePrimaryRelations() { + for typeName, relationProps := range p.relationTypesMap { + typeDef := p.types[typeName] + for _, prop := range typeDef.props { + for relPropName, relPropType := range relationProps { + if prop.typeStr == relPropType { + relatedTypeDef := p.types[relPropType] + relatedProp := relatedTypeDef.props[relPropName] + if !relatedProp.isPrimary.HasValue() { + relatedProp.isPrimary = immutable.Some(typeName == p.firstRelationType) + relatedTypeDef.props[relPropName] = relatedProp + p.types[relPropType] = relatedTypeDef + delete(p.relationTypesMap, relPropType) + } + if !prop.isPrimary.HasValue() { + val := typeName != p.firstRelationType + if relatedProp.isPrimary.HasValue() { + val = !relatedProp.isPrimary.Value() + } + prop.isPrimary = immutable.Some(val) + typeDef.props[prop.name] = prop + } + } + } + } + p.types[typeName] = typeDef + } +} + +func makeExplainQuery(req string) string { + return "query @explain(type: execute) " + req[6:] +} diff --git a/tests/integration/lens.go b/tests/integration/lens.go index 317864ab3e..e69437d87b 100644 --- a/tests/integration/lens.go +++ b/tests/integration/lens.go @@ -57,7 +57,7 @@ func configureMigration( action ConfigureMigration, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - db := getStore(s, node.DB, action.TransactionID, action.ExpectedError) + db := getStore(s, node, action.TransactionID, action.ExpectedError) err := db.SetMigration(s.ctx, action.LensConfig) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) @@ -71,7 +71,7 @@ func getMigrations( action GetMigrations, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - db := getStore(s, node.DB, action.TransactionID, "") + db := getStore(s, node, action.TransactionID, "") configs, err := db.LensRegistry().Config(s.ctx) require.NoError(s.t, err) diff --git a/tests/integration/mutation/create/simple_test.go b/tests/integration/mutation/create/simple_test.go index e1f4aa6d01..54f3de9536 100644 --- a/tests/integration/mutation/create/simple_test.go +++ b/tests/integration/mutation/create/simple_test.go @@ -85,7 +85,7 @@ func TestMutationCreate(t *testing.T) { { "_key": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", "name": "John", - "age": uint64(27), + "age": int64(27), }, }, }, diff --git a/tests/integration/mutation/create/with_version_test.go b/tests/integration/mutation/create/with_version_test.go index 6c28e898f7..7cf879737e 100644 --- a/tests/integration/mutation/create/with_version_test.go +++ b/tests/integration/mutation/create/with_version_test.go @@ -39,7 +39,7 @@ func TestMutationCreate_ReturnsVersionCID(t *testing.T) { { "_version": []map[string]any{ { - "cid": "bafybeifugdzbm7y3eihxe7wbldyesxeh6s6m62ghvwipphtld547rfi4cu", + "cid": "bafybeifwfw3g4q6tagffdwq4orrouoosdlsc5rb67q2uj7oplkq7ax5ysm", }, }, }, diff --git a/tests/integration/mutation/delete/field_kinds/one_to_many/with_show_deleted_test.go b/tests/integration/mutation/delete/field_kinds/one_to_many/with_show_deleted_test.go index 6969ea3c6f..a30cf60050 100644 --- a/tests/integration/mutation/delete/field_kinds/one_to_many/with_show_deleted_test.go +++ b/tests/integration/mutation/delete/field_kinds/one_to_many/with_show_deleted_test.go @@ -102,7 +102,7 @@ func TestDeletionOfADocumentUsingSingleKeyWithShowDeletedDocumentQuery(t *testin { "_deleted": false, "name": "John", - "age": uint64(30), + "age": int64(30), "published": []map[string]any{ { "_deleted": true, diff --git a/tests/integration/mutation/delete/with_filter_test.go b/tests/integration/mutation/delete/with_filter_test.go index 70d4550be9..79bf04753d 100644 --- a/tests/integration/mutation/delete/with_filter_test.go +++ b/tests/integration/mutation/delete/with_filter_test.go @@ -88,10 +88,10 @@ func TestMutationDeletion_WithFilterMatchingMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "age": uint64(2), + "age": int64(2), }, { - "age": uint64(1), + "age": int64(1), }, }, }, diff --git a/tests/integration/mutation/mix/with_txn_test.go b/tests/integration/mutation/mix/with_txn_test.go index 3b12513a23..8a88db606a 100644 --- a/tests/integration/mutation/mix/with_txn_test.go +++ b/tests/integration/mutation/mix/with_txn_test.go @@ -109,7 +109,7 @@ func TestMutationWithTxnDoesNotDeletesUserGivenDifferentTransactions(t *testing. { "_key": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", "name": "John", - "age": uint64(27), + "age": int64(27), }, }, }, @@ -174,7 +174,7 @@ func TestMutationWithTxnDoesUpdateUserGivenSameTransactions(t *testing.T) { { "_key": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", "name": "John", - "age": uint64(28), + "age": int64(28), }, }, }, @@ -215,7 +215,7 @@ func TestMutationWithTxnDoesNotUpdateUserGivenDifferentTransactions(t *testing.T { "_key": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", "name": "John", - "age": uint64(28), + "age": int64(28), }, }, }, @@ -232,7 +232,7 @@ func TestMutationWithTxnDoesNotUpdateUserGivenDifferentTransactions(t *testing.T { "_key": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", "name": "John", - "age": uint64(27), + "age": int64(27), }, }, }, @@ -274,7 +274,7 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) { "_key": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", "name": "John", - "age": uint64(28), + "age": int64(28), }, }, }, @@ -291,7 +291,7 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) { "_key": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", "name": "John", - "age": uint64(29), + "age": int64(29), }, }, }, @@ -315,7 +315,7 @@ func TestMutationWithTxnDoesNotAllowUpdateInSecondTransactionUser(t *testing.T) { "_key": "bae-88b63198-7d38-5714-a9ff-21ba46374fd1", "name": "John", - "age": uint64(28), + "age": int64(28), }, }, }, diff --git a/tests/integration/mutation/update/field_kinds/one_to_one/with_self_ref_test.go b/tests/integration/mutation/update/field_kinds/one_to_one/with_self_ref_test.go new file mode 100644 index 0000000000..16225f4ab3 --- /dev/null +++ b/tests/integration/mutation/update/field_kinds/one_to_one/with_self_ref_test.go @@ -0,0 +1,191 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package one_to_one + +import ( + "fmt" + "testing" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestMutationUpdateOneToOne_SelfReferencingFromPrimary(t *testing.T) { + user1ID := "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + + test := testUtils.TestCase{ + Description: "One to one update mutation, self referencing from primary", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + boss: User @primary + underling: User + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Fred" + }`, + }, + testUtils.UpdateDoc{ + DocID: 1, + Doc: fmt.Sprintf( + `{ + "boss_id": "%s" + }`, + user1ID, + ), + }, + testUtils.Request{ + Request: ` + query { + User { + name + boss { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + "boss": map[string]any{ + "name": "John", + }, + }, + { + "name": "John", + "boss": nil, + }, + }, + }, + testUtils.Request{ + Request: ` + query { + User { + name + underling { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + "underling": nil, + }, + { + "name": "John", + "underling": map[string]any{ + "name": "Fred", + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestMutationUpdateOneToOne_SelfReferencingFromSecondary(t *testing.T) { + user1ID := "bae-decf6467-4c7c-50d7-b09d-0a7097ef6bad" + + test := testUtils.TestCase{ + Description: "One to one update mutation, self referencing from secondary", + + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type User { + name: String + boss: User + underling: User @primary + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "Fred" + }`, + }, + testUtils.UpdateDoc{ + DocID: 1, + Doc: fmt.Sprintf( + `{ + "boss_id": "%s" + }`, + user1ID, + ), + }, + testUtils.Request{ + Request: ` + query { + User { + name + boss { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + "boss": map[string]any{ + "name": "John", + }, + }, + { + "name": "John", + "boss": nil, + }, + }, + }, + testUtils.Request{ + Request: ` + query { + User { + name + underling { + name + } + } + }`, + Results: []map[string]any{ + { + "name": "Fred", + "underling": nil, + }, + { + "name": "John", + "underling": map[string]any{ + "name": "Fred", + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/net/order/tcp_test.go b/tests/integration/net/order/tcp_test.go index 118a69fd85..a66856be3e 100644 --- a/tests/integration/net/order/tcp_test.go +++ b/tests/integration/net/order/tcp_test.go @@ -56,12 +56,12 @@ func TestP2PWithSingleDocumentUpdatePerNode(t *testing.T) { Results: map[int]map[int]map[string]any{ 0: { 0: { - "Age": uint64(45), + "Age": int64(45), }, }, 1: { 0: { - "Age": uint64(60), + "Age": int64(60), }, }, }, @@ -119,12 +119,12 @@ func TestP2PWithMultipleDocumentUpdatesPerNode(t *testing.T) { Results: map[int]map[int]map[string]any{ 0: { 0: { - "Age": uint64(47), + "Age": int64(47), }, }, 1: { 0: { - "Age": uint64(62), + "Age": int64(62), }, }, }, @@ -157,7 +157,7 @@ func TestP2FullPReplicator(t *testing.T) { ReplicatorResult: map[int]map[string]map[string]any{ 1: { doc.Key().String(): { - "Age": uint64(21), + "Age": int64(21), }, }, }, diff --git a/tests/integration/net/order/utils.go b/tests/integration/net/order/utils.go index 83d01743b9..84f41f98d6 100644 --- a/tests/integration/net/order/utils.go +++ b/tests/integration/net/order/utils.go @@ -16,7 +16,6 @@ import ( "strings" "testing" - ma "github.com/multiformats/go-multiaddr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -26,7 +25,6 @@ import ( "github.com/sourcenetwork/defradb/errors" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" - netpb "github.com/sourcenetwork/defradb/net/pb" netutils "github.com/sourcenetwork/defradb/net/utils" testutils "github.com/sourcenetwork/defradb/tests/integration" ) @@ -112,14 +110,11 @@ func setupDefraNode(t *testing.T, cfg *config.Config, seeds []string) (*net.Node return nil, nil, errors.Wrap(fmt.Sprintf("failed to parse bootstrap peers %v", cfg.Net.Peers), err) } log.Info(ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) - n.Boostrap(addrs) + n.Bootstrap(addrs) } if err := n.Start(); err != nil { - closeErr := n.Close() - if closeErr != nil { - return nil, nil, errors.Wrap(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr) - } + n.Close() return nil, nil, errors.Wrap("unable to start P2P listeners", err) } @@ -208,9 +203,10 @@ func executeTestCase(t *testing.T, test P2PTestCase) { log.Info(ctx, "cannot set a peer that hasn't been started. Skipping to next peer") continue } + peerInfo := nodes[p].PeerInfo() peerAddresses = append( peerAddresses, - fmt.Sprintf("%s/p2p/%s", test.NodeConfig[p].Net.P2PAddress, nodes[p].PeerID()), + fmt.Sprintf("%s/p2p/%s", peerInfo.Addrs[0], peerInfo.ID), ) } cfg.Net.Peers = strings.Join(peerAddresses, ",") @@ -262,7 +258,7 @@ func executeTestCase(t *testing.T, test P2PTestCase) { continue } log.Info(ctx, fmt.Sprintf("Waiting for node %d to sync with peer %d", n2, n)) - err := p.WaitForPushLogByPeerEvent(nodes[n].PeerID()) + err := p.WaitForPushLogByPeerEvent(nodes[n].PeerInfo().ID) require.NoError(t, err) log.Info(ctx, fmt.Sprintf("Node %d synced", n2)) } @@ -301,16 +297,9 @@ func executeTestCase(t *testing.T, test P2PTestCase) { for i, n := range nodes { if reps, ok := test.NodeReplicators[i]; ok { for _, r := range reps { - addr, err := ma.NewMultiaddr( - fmt.Sprintf("%s/p2p/%s", test.NodeConfig[r].Net.P2PAddress, nodes[r].PeerID()), - ) - require.NoError(t, err) - _, err = n.Peer.SetReplicator( - ctx, - &netpb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) + err := n.Peer.SetReplicator(ctx, client.Replicator{ + Info: nodes[r].PeerInfo(), + }) require.NoError(t, err) } } @@ -349,19 +338,14 @@ func executeTestCase(t *testing.T, test P2PTestCase) { // clean up for _, n := range nodes { - if err := n.Close(); err != nil { - log.Info(ctx, "node not closing as expected", logging.NewKV("Error", err.Error())) - } - n.DB.Close(ctx) + n.Close() + n.DB.Close() } } -const randomMultiaddr = "/ip4/0.0.0.0/tcp/0" - func randomNetworkingConfig() *config.Config { cfg := config.DefaultConfig() - cfg.Net.P2PAddress = randomMultiaddr - cfg.Net.RPCAddress = "0.0.0.0:0" - cfg.Net.TCPAddress = randomMultiaddr + cfg.Net.P2PAddress = "/ip4/127.0.0.1/tcp/0" + cfg.Net.RelayEnabled = false return cfg } diff --git a/tests/integration/net/state/simple/peer/subscribe/with_add_remove_test.go b/tests/integration/net/state/simple/peer/subscribe/with_add_remove_test.go index d3795ee0e1..26dbfdf151 100644 --- a/tests/integration/net/state/simple/peer/subscribe/with_add_remove_test.go +++ b/tests/integration/net/state/simple/peer/subscribe/with_add_remove_test.go @@ -170,7 +170,7 @@ func TestP2PSubscribeAddSingleAndRemoveErroneous(t *testing.T) { testUtils.UnsubscribeToCollection{ NodeID: 1, CollectionIDs: []int{0, testUtils.NonExistentCollectionID}, - ExpectedError: "datastore: key not found", + ExpectedError: "collection not found", }, testUtils.CreateDoc{ NodeID: immutable.Some(0), diff --git a/tests/integration/net/state/simple/peer/subscribe/with_add_test.go b/tests/integration/net/state/simple/peer/subscribe/with_add_test.go index 8cd294e98a..04a1a3d57c 100644 --- a/tests/integration/net/state/simple/peer/subscribe/with_add_test.go +++ b/tests/integration/net/state/simple/peer/subscribe/with_add_test.go @@ -199,7 +199,7 @@ func TestP2PSubscribeAddSingleErroneousCollectionID(t *testing.T) { testUtils.SubscribeToCollection{ NodeID: 1, CollectionIDs: []int{testUtils.NonExistentCollectionID}, - ExpectedError: "datastore: key not found", + ExpectedError: "collection not found", }, testUtils.CreateDoc{ NodeID: immutable.Some(0), @@ -243,7 +243,7 @@ func TestP2PSubscribeAddValidAndErroneousCollectionID(t *testing.T) { testUtils.SubscribeToCollection{ NodeID: 1, CollectionIDs: []int{0, testUtils.NonExistentCollectionID}, - ExpectedError: "datastore: key not found", + ExpectedError: "collection not found", }, testUtils.CreateDoc{ NodeID: immutable.Some(0), @@ -292,7 +292,7 @@ func TestP2PSubscribeAddValidThenErroneousCollectionID(t *testing.T) { testUtils.SubscribeToCollection{ NodeID: 1, CollectionIDs: []int{testUtils.NonExistentCollectionID}, - ExpectedError: "datastore: key not found", + ExpectedError: "collection not found", }, testUtils.CreateDoc{ NodeID: immutable.Some(0), diff --git a/tests/integration/net/state/simple/peer/with_create_add_field_test.go b/tests/integration/net/state/simple/peer/with_create_add_field_test.go index 034340b92e..31861d6498 100644 --- a/tests/integration/net/state/simple/peer/with_create_add_field_test.go +++ b/tests/integration/net/state/simple/peer/with_create_add_field_test.go @@ -35,7 +35,7 @@ func TestP2PPeerCreateWithNewFieldSyncsDocsToOlderSchemaVersion(t *testing.T) { NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, @@ -108,7 +108,7 @@ func TestP2PPeerCreateWithNewFieldSyncsDocsToNewerSchemaVersion(t *testing.T) { NodeID: immutable.Some(1), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, @@ -164,7 +164,7 @@ func TestP2PPeerCreateWithNewFieldSyncsDocsToUpdatedSchemaVersion(t *testing.T) // Patch the schema on all nodes Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, diff --git a/tests/integration/net/state/simple/peer/with_create_test.go b/tests/integration/net/state/simple/peer/with_create_test.go index 8833167aa2..a6c095024c 100644 --- a/tests/integration/net/state/simple/peer/with_create_test.go +++ b/tests/integration/net/state/simple/peer/with_create_test.go @@ -59,10 +59,10 @@ func TestP2PCreateDoesNotSync(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, { - "Age": uint64(300), + "Age": int64(300), }, }, }, @@ -75,7 +75,7 @@ func TestP2PCreateDoesNotSync(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(300), + "Age": int64(300), }, // Peer sync should not sync new documents to nodes }, @@ -147,13 +147,13 @@ func TestP2PCreateWithP2PCollection(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, { - "Age": uint64(30), + "Age": int64(30), }, { - "Age": uint64(28), + "Age": int64(28), }, // Peer sync should not sync new documents to nodes that is not subscribed // to the P2P collection. @@ -168,16 +168,16 @@ func TestP2PCreateWithP2PCollection(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, { - "Age": uint64(31), + "Age": int64(31), }, { - "Age": uint64(30), + "Age": int64(30), }, { - "Age": uint64(28), + "Age": int64(28), }, }, }, diff --git a/tests/integration/net/state/simple/peer/with_delete_test.go b/tests/integration/net/state/simple/peer/with_delete_test.go index c838e8ab02..49a0b98c41 100644 --- a/tests/integration/net/state/simple/peer/with_delete_test.go +++ b/tests/integration/net/state/simple/peer/with_delete_test.go @@ -68,7 +68,7 @@ func TestP2PWithMultipleDocumentsSingleDelete(t *testing.T) { { "_deleted": false, "Name": "Andy", - "Age": uint64(74), + "Age": int64(74), }, }, }, @@ -126,12 +126,12 @@ func TestP2PWithMultipleDocumentsSingleDeleteWithShowDeleted(t *testing.T) { { "_deleted": false, "Name": "Andy", - "Age": uint64(74), + "Age": int64(74), }, { "_deleted": true, "Name": "John", - "Age": uint64(43), + "Age": int64(43), }, }, }, @@ -198,12 +198,12 @@ func TestP2PWithMultipleDocumentsWithSingleUpdateBeforeConnectSingleDeleteWithSh { "_deleted": false, "Name": "Andy", - "Age": uint64(74), + "Age": int64(74), }, { "_deleted": true, "Name": "John", - "Age": uint64(60), + "Age": int64(60), }, }, }, @@ -279,12 +279,12 @@ func TestP2PWithMultipleDocumentsWithMultipleUpdatesBeforeConnectSingleDeleteWit { "_deleted": false, "Name": "Andy", - "Age": uint64(74), + "Age": int64(74), }, { "_deleted": true, "Name": "John", - "Age": uint64(62), + "Age": int64(62), }, }, }, @@ -370,12 +370,12 @@ func TestP2PWithMultipleDocumentsWithUpdateAndDeleteBeforeConnectSingleDeleteWit { "_deleted": false, "Name": "Andy", - "Age": uint64(74), + "Age": int64(74), }, { "_deleted": true, "Name": "John", - "Age": uint64(62), + "Age": int64(62), }, }, }, @@ -394,12 +394,12 @@ func TestP2PWithMultipleDocumentsWithUpdateAndDeleteBeforeConnectSingleDeleteWit { "_deleted": false, "Name": "Andy", - "Age": uint64(74), + "Age": int64(74), }, { "_deleted": false, "Name": "John", - "Age": uint64(66), + "Age": int64(66), }, }, }, diff --git a/tests/integration/net/state/simple/peer/with_update_add_field_test.go b/tests/integration/net/state/simple/peer/with_update_add_field_test.go index 89ab3a99b0..88e86a75a3 100644 --- a/tests/integration/net/state/simple/peer/with_update_add_field_test.go +++ b/tests/integration/net/state/simple/peer/with_update_add_field_test.go @@ -48,7 +48,7 @@ func TestP2PPeerUpdateWithNewFieldSyncsDocsToOlderSchemaVersionMultistep(t *test NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, @@ -133,7 +133,7 @@ func TestP2PPeerUpdateWithNewFieldSyncsDocsToOlderSchemaVersion(t *testing.T) { NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, diff --git a/tests/integration/net/state/simple/peer/with_update_restart_test.go b/tests/integration/net/state/simple/peer/with_update_restart_test.go index 42fc00e4bd..2f68870be4 100644 --- a/tests/integration/net/state/simple/peer/with_update_restart_test.go +++ b/tests/integration/net/state/simple/peer/with_update_restart_test.go @@ -59,7 +59,7 @@ func TestP2PWithSingleDocumentSingleUpdateFromChildAndRestart(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(60), + "Age": int64(60), }, }, }, diff --git a/tests/integration/net/state/simple/peer/with_update_test.go b/tests/integration/net/state/simple/peer/with_update_test.go index fe122239ce..5a04cd6034 100644 --- a/tests/integration/net/state/simple/peer/with_update_test.go +++ b/tests/integration/net/state/simple/peer/with_update_test.go @@ -60,7 +60,7 @@ func TestP2PWithSingleDocumentSingleUpdateFromChild(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(60), + "Age": int64(60), }, }, }, @@ -112,7 +112,7 @@ func TestP2PWithSingleDocumentSingleUpdateFromParent(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(60), + "Age": int64(60), }, }, }, @@ -170,7 +170,7 @@ func TestP2PWithSingleDocumentUpdatePerNode(t *testing.T) { }`, Results: []map[string]any{ { - "Age": testUtils.AnyOf{uint64(45), uint64(60)}, + "Age": testUtils.AnyOf{int64(45), int64(60)}, }, }, }, @@ -223,7 +223,7 @@ func TestP2PWithSingleDocumentSingleUpdateDoesNotSyncToNonPeerNode(t *testing.T) }`, Results: []map[string]any{ { - "Age": uint64(60), + "Age": int64(60), }, }, }, @@ -236,7 +236,7 @@ func TestP2PWithSingleDocumentSingleUpdateDoesNotSyncToNonPeerNode(t *testing.T) }`, Results: []map[string]any{ { - "Age": uint64(60), + "Age": int64(60), }, }, }, @@ -250,7 +250,7 @@ func TestP2PWithSingleDocumentSingleUpdateDoesNotSyncToNonPeerNode(t *testing.T) }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -305,7 +305,7 @@ func TestP2PWithSingleDocumentSingleUpdateDoesNotSyncFromUnmappedNode(t *testing }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -319,7 +319,7 @@ func TestP2PWithSingleDocumentSingleUpdateDoesNotSyncFromUnmappedNode(t *testing }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -332,7 +332,7 @@ func TestP2PWithSingleDocumentSingleUpdateDoesNotSyncFromUnmappedNode(t *testing }`, Results: []map[string]any{ { - "Age": uint64(60), + "Age": int64(60), }, }, }, @@ -412,7 +412,7 @@ func TestP2PWithMultipleDocumentUpdatesPerNode(t *testing.T) { }`, Results: []map[string]any{ { - "Age": testUtils.AnyOf{uint64(47), uint64(62)}, + "Age": testUtils.AnyOf{int64(47), int64(62)}, }, }, }, @@ -475,10 +475,10 @@ func TestP2PWithSingleDocumentSingleUpdateFromChildWithP2PCollection(t *testing. }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, { - "Age": uint64(60), + "Age": int64(60), }, }, }, @@ -591,10 +591,10 @@ func TestP2PWithMultipleDocumentUpdatesPerNodeWithP2PCollection(t *testing.T) { }`, Results: []map[string]any{ { - "Age": testUtils.AnyOf{uint64(47), uint64(62)}, + "Age": testUtils.AnyOf{int64(47), int64(62)}, }, { - "Age": uint64(60), + "Age": int64(60), }, }, }, diff --git a/tests/integration/net/state/simple/peer_replicator/with_create_test.go b/tests/integration/net/state/simple/peer_replicator/with_create_test.go index 72aae77a8c..c7b1bf0e8e 100644 --- a/tests/integration/net/state/simple/peer_replicator/with_create_test.go +++ b/tests/integration/net/state/simple/peer_replicator/with_create_test.go @@ -63,10 +63,10 @@ func TestP2PPeerReplicatorWithCreate(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, { - "Age": uint64(3000), + "Age": int64(3000), }, }, }, @@ -79,7 +79,7 @@ func TestP2PPeerReplicatorWithCreate(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -92,10 +92,10 @@ func TestP2PPeerReplicatorWithCreate(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, { - "Age": uint64(3000), + "Age": int64(3000), }, }, }, diff --git a/tests/integration/net/state/simple/peer_replicator/with_delete_test.go b/tests/integration/net/state/simple/peer_replicator/with_delete_test.go index ba72c30610..1d73e53da7 100644 --- a/tests/integration/net/state/simple/peer_replicator/with_delete_test.go +++ b/tests/integration/net/state/simple/peer_replicator/with_delete_test.go @@ -64,7 +64,7 @@ func TestP2PPeerReplicatorWithDeleteShowDeleted(t *testing.T) { { "_deleted": true, "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, diff --git a/tests/integration/net/state/simple/peer_replicator/with_update_restart_test.go b/tests/integration/net/state/simple/peer_replicator/with_update_restart_test.go index 731f86b661..b908e5ae38 100644 --- a/tests/integration/net/state/simple/peer_replicator/with_update_restart_test.go +++ b/tests/integration/net/state/simple/peer_replicator/with_update_restart_test.go @@ -67,7 +67,7 @@ func TestP2PPeerReplicatorWithUpdateAndRestart(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(60), + "Age": int64(60), }, }, }, diff --git a/tests/integration/net/state/simple/peer_replicator/with_update_test.go b/tests/integration/net/state/simple/peer_replicator/with_update_test.go index 8e2a86998e..7e7e2682b5 100644 --- a/tests/integration/net/state/simple/peer_replicator/with_update_test.go +++ b/tests/integration/net/state/simple/peer_replicator/with_update_test.go @@ -62,7 +62,7 @@ func TestP2PPeerReplicatorWithUpdate(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(60), + "Age": int64(60), }, }, }, diff --git a/tests/integration/net/state/simple/replicator/with_create_add_field_test.go b/tests/integration/net/state/simple/replicator/with_create_add_field_test.go index 3e36b5c847..f73c731666 100644 --- a/tests/integration/net/state/simple/replicator/with_create_add_field_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_add_field_test.go @@ -35,7 +35,7 @@ func TestP2POneToOneReplicatorCreateWithNewFieldSyncsDocsToOlderSchemaVersion(t NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, @@ -87,7 +87,7 @@ func TestP2POneToOneReplicatorCreateWithNewFieldSyncsDocsToNewerSchemaVersion(t NodeID: immutable.Some(1), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, @@ -137,7 +137,7 @@ func TestP2POneToOneReplicatorCreateWithNewFieldSyncsDocsToUpdatedSchemaVersion( // Patch the schema on all nodes Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, diff --git a/tests/integration/net/state/simple/replicator/with_create_restart_test.go b/tests/integration/net/state/simple/replicator/with_create_restart_test.go index 7dc5746724..92ad213dd0 100644 --- a/tests/integration/net/state/simple/replicator/with_create_restart_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_restart_test.go @@ -53,7 +53,7 @@ func TestP2POneToOneReplicatorWithRestart(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, }, }, diff --git a/tests/integration/net/state/simple/replicator/with_create_test.go b/tests/integration/net/state/simple/replicator/with_create_test.go index e1e75a25c0..f877457c9c 100644 --- a/tests/integration/net/state/simple/replicator/with_create_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_test.go @@ -12,6 +12,7 @@ package replicator import ( "testing" + "time" "github.com/sourcenetwork/immutable" @@ -53,7 +54,7 @@ func TestP2POneToOneReplicator(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -98,7 +99,7 @@ func TestP2POneToOneReplicatorDoesNotSyncExisting(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -150,6 +151,55 @@ func TestP2POneToOneReplicatorDoesNotSyncFromTargetToSource(t *testing.T) { testUtils.ExecuteTestCase(t, test) } +func TestP2POneToOneReplicatorDoesNotSyncFromDeletedReplicator(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.RandomNetworkingConfig(), + testUtils.RandomNetworkingConfig(), + testUtils.SchemaUpdate{ + Schema: ` + type Users { + Name: String + Age: Int + } + `, + }, + testUtils.ConfigureReplicator{ + SourceNodeID: 0, + TargetNodeID: 1, + }, + testUtils.DeleteReplicator{ + SourceNodeID: 0, + TargetNodeID: 1, + }, + testUtils.CreateDoc{ + // Create John on the first (source) node only + NodeID: immutable.Some(0), + Doc: `{ + "Name": "John", + "Age": 21 + }`, + }, + testUtils.WaitForSync{ + // No documents should be synced + ExpectedTimeout: 100 * time.Millisecond, + }, + testUtils.Request{ + // Assert that John has not been synced to the second (target) node + NodeID: immutable.Some(1), + Request: `query { + Users { + Age + } + }`, + Results: []map[string]any{}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + func TestP2POneToManyReplicator(t *testing.T) { test := testUtils.TestCase{ Actions: []any{ @@ -189,7 +239,7 @@ func TestP2POneToManyReplicator(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -236,7 +286,7 @@ func TestP2POneToOneOfManyReplicator(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -249,7 +299,7 @@ func TestP2POneToOneOfManyReplicator(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -311,10 +361,10 @@ func TestP2POneToOneReplicatorManyDocs(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, { - "Age": uint64(22), + "Age": int64(22), }, }, }, @@ -371,10 +421,10 @@ func TestP2POneToManyReplicatorManyDocs(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, { - "Age": uint64(22), + "Age": int64(22), }, }, }, @@ -438,11 +488,11 @@ func TestP2POneToOneReplicatorOrderIndependent(t *testing.T) { Results: []map[string]any{ { "_key": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", - "age": uint64(21), + "age": int64(21), "name": "John", "_version": []map[string]any{ { - "schemaVersionId": "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", + "schemaVersionId": "bafkreiggbvwwiqmzid4qnklwwdyu7mwhbbjy3ejss3x7uw7zxw6ivmmj6u", }, }, }, @@ -502,7 +552,7 @@ func TestP2POneToOneReplicatorOrderIndependentDirectCreate(t *testing.T) { "_key": "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", "_version": []map[string]any{ { - "schemaVersionId": "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", + "schemaVersionId": "bafkreiggbvwwiqmzid4qnklwwdyu7mwhbbjy3ejss3x7uw7zxw6ivmmj6u", }, }, }, diff --git a/tests/integration/net/state/simple/replicator/with_create_update_test.go b/tests/integration/net/state/simple/replicator/with_create_update_test.go index dd3612055d..a8b259d46e 100644 --- a/tests/integration/net/state/simple/replicator/with_create_update_test.go +++ b/tests/integration/net/state/simple/replicator/with_create_update_test.go @@ -61,7 +61,7 @@ func TestP2POneToOneReplicatorWithCreateWithUpdate(t *testing.T) { }`, Results: []map[string]any{ { - "Age": uint64(60), + "Age": int64(60), }, }, }, @@ -118,7 +118,7 @@ func TestP2POneToOneReplicatorWithCreateWithUpdateOnRecipientNode(t *testing.T) }`, Results: []map[string]any{ { - "Age": uint64(60), + "Age": int64(60), }, }, }, @@ -141,10 +141,6 @@ func TestP2POneToOneReplicatorDoesNotUpdateDocExistingOnlyOnTarget(t *testing.T) } `, }, - testUtils.ConfigureReplicator{ - SourceNodeID: 0, - TargetNodeID: 1, - }, testUtils.CreateDoc{ // This document is created in all nodes Doc: `{ @@ -152,6 +148,13 @@ func TestP2POneToOneReplicatorDoesNotUpdateDocExistingOnlyOnTarget(t *testing.T) "Age": 21 }`, }, + testUtils.ConfigureReplicator{ + // Replication must happen after creating documents + // on both nodes, or a race condition can occur + // on the second node when creating the document + SourceNodeID: 0, + TargetNodeID: 1, + }, testUtils.CreateDoc{ // This document is created in the second node (target) only NodeID: immutable.Some(1), diff --git a/tests/integration/net/state/simple/replicator/with_delete_test.go b/tests/integration/net/state/simple/replicator/with_delete_test.go index 48235e1b0a..89a715d356 100644 --- a/tests/integration/net/state/simple/replicator/with_delete_test.go +++ b/tests/integration/net/state/simple/replicator/with_delete_test.go @@ -62,7 +62,7 @@ func TestP2POneToOneReplicatorDeletesDocCreatedBeforeReplicatorConfig(t *testing { "_deleted": true, "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -116,7 +116,7 @@ func TestP2POneToOneReplicatorDeletesDocCreatedBeforeReplicatorConfigWithNodesIn { "_deleted": true, "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, diff --git a/tests/integration/net/state/simple/replicator/with_update_add_field_test.go b/tests/integration/net/state/simple/replicator/with_update_add_field_test.go index 22786fcaad..52f67d324d 100644 --- a/tests/integration/net/state/simple/replicator/with_update_add_field_test.go +++ b/tests/integration/net/state/simple/replicator/with_update_add_field_test.go @@ -44,7 +44,7 @@ func TestP2PReplicatorUpdateWithNewFieldSyncsDocsToOlderSchemaVersionMultistep(t NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, @@ -125,7 +125,7 @@ func TestP2PReplicatorUpdateWithNewFieldSyncsDocsToOlderSchemaVersion(t *testing NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "Email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "Email", "Kind": 11} } ] `, }, diff --git a/tests/integration/net/state/simple/replicator/with_update_test.go b/tests/integration/net/state/simple/replicator/with_update_test.go index 370160c3b8..60891f54e3 100644 --- a/tests/integration/net/state/simple/replicator/with_update_test.go +++ b/tests/integration/net/state/simple/replicator/with_update_test.go @@ -60,7 +60,7 @@ func TestP2POneToOneReplicatorUpdatesDocCreatedBeforeReplicatorConfig(t *testing }`, Results: []map[string]any{ { - "Age": uint64(60), + "Age": int64(60), }, }, }, @@ -112,7 +112,7 @@ func TestP2POneToOneReplicatorUpdatesDocCreatedBeforeReplicatorConfigWithNodesIn }`, Results: []map[string]any{ { - "Age": uint64(60), + "Age": int64(60), }, }, }, diff --git a/tests/integration/p2p.go b/tests/integration/p2p.go index 311a088c86..2e4fb86b5d 100644 --- a/tests/integration/p2p.go +++ b/tests/integration/p2p.go @@ -11,16 +11,14 @@ package tests import ( - "fmt" "time" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/logging" - "github.com/sourcenetwork/defradb/net" - pb "github.com/sourcenetwork/defradb/net/pb" - netutils "github.com/sourcenetwork/defradb/net/utils" + "github.com/sourcenetwork/defradb/tests/clients" - ma "github.com/multiformats/go-multiaddr" + "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -59,9 +57,21 @@ type ConfigureReplicator struct { TargetNodeID int } -// NonExistentCollectionID can be used to represent a non-existent collection ID, it will be substituted -// for a non-existent collection ID when used in actions that support this. -const NonExistentCollectionID int = -1 +// DeleteReplicator deletes a directional replicator relationship between two nodes. +type DeleteReplicator struct { + // SourceNodeID is the node ID (index) of the node from which the replicator should be deleted. + SourceNodeID int + + // TargetNodeID is the node ID (index) of the node to which the replicator should be deleted. + TargetNodeID int +} + +const ( + // NonExistentCollectionID can be used to represent a non-existent collection ID, it will be substituted + // for a non-existent collection ID when used in actions that support this. + NonExistentCollectionID int = -1 + NonExistentCollectionSchemaRoot string = "NonExistentCollectionID" +) // SubscribeToCollection sets up a subscription on the given node to the given collection. // @@ -119,7 +129,10 @@ type GetAllP2PCollections struct { // // For example you will likely wish to `WaitForSync` after creating a document in node 0 before querying // node 1 to see if it has been replicated. -type WaitForSync struct{} +type WaitForSync struct { + // ExpectedTimeout is the duration to wait when expecting a timeout to occur. + ExpectedTimeout time.Duration +} // connectPeers connects two existing, started, nodes as peers. It returns a channel // that will receive an empty struct upon sync completion of all expected peer-sync events. @@ -134,15 +147,10 @@ func connectPeers( time.Sleep(100 * time.Millisecond) sourceNode := s.nodes[cfg.SourceNodeID] targetNode := s.nodes[cfg.TargetNodeID] - targetAddress := s.nodeAddresses[cfg.TargetNodeID] - log.Info(s.ctx, "Parsing bootstrap peers", logging.NewKV("Peers", targetAddress)) - addrs, err := netutils.ParsePeers([]string{targetAddress}) - if err != nil { - s.t.Fatal(fmt.Sprintf("failed to parse bootstrap peers %v", targetAddress), err) - } + addrs := []peer.AddrInfo{targetNode.PeerInfo()} log.Info(s.ctx, "Bootstrapping with peers", logging.NewKV("Addresses", addrs)) - sourceNode.Boostrap(addrs) + sourceNode.Bootstrap(addrs) // Bootstrap triggers a bunch of async stuff for which we have no good way of waiting on. It must be // allowed to complete before documentation begins or it will not even try and sync it. So for now, we @@ -155,12 +163,16 @@ func setupPeerWaitSync( s *state, startIndex int, cfg ConnectPeers, - sourceNode *net.Node, - targetNode *net.Node, + sourceNode clients.Client, + targetNode clients.Client, ) { - nodeCollections := map[int][]int{} sourceToTargetEvents := []int{0} targetToSourceEvents := []int{0} + + sourcePeerInfo := sourceNode.PeerInfo() + targetPeerInfo := targetNode.PeerInfo() + + nodeCollections := map[int][]int{} waitIndex := 0 for i := startIndex; i < len(s.testCase.Actions); i++ { switch action := s.testCase.Actions[i].(type) { @@ -245,11 +257,11 @@ func setupPeerWaitSync( ready <- struct{}{} for waitIndex := 0; waitIndex < len(sourceToTargetEvents); waitIndex++ { for i := 0; i < targetToSourceEvents[waitIndex]; i++ { - err := sourceNode.WaitForPushLogByPeerEvent(targetNode.PeerID()) + err := sourceNode.WaitForPushLogByPeerEvent(targetPeerInfo.ID) require.NoError(s.t, err) } for i := 0; i < sourceToTargetEvents[waitIndex]; i++ { - err := targetNode.WaitForPushLogByPeerEvent(sourceNode.PeerID()) + err := targetNode.WaitForPushLogByPeerEvent(sourcePeerInfo.ID) require.NoError(s.t, err) } nodeSynced <- struct{}{} @@ -291,30 +303,40 @@ func configureReplicator( time.Sleep(100 * time.Millisecond) sourceNode := s.nodes[cfg.SourceNodeID] targetNode := s.nodes[cfg.TargetNodeID] - targetAddress := s.nodeAddresses[cfg.TargetNodeID] - addr, err := ma.NewMultiaddr(targetAddress) + err := sourceNode.SetReplicator(s.ctx, client.Replicator{ + Info: targetNode.PeerInfo(), + }) require.NoError(s.t, err) + setupReplicatorWaitSync(s, 0, cfg, sourceNode, targetNode) +} - _, err = sourceNode.Peer.SetReplicator( - s.ctx, - &pb.SetReplicatorRequest{ - Addr: addr.Bytes(), - }, - ) +func deleteReplicator( + s *state, + cfg DeleteReplicator, +) { + sourceNode := s.nodes[cfg.SourceNodeID] + targetNode := s.nodes[cfg.TargetNodeID] + + err := sourceNode.DeleteReplicator(s.ctx, client.Replicator{ + Info: targetNode.PeerInfo(), + }) require.NoError(s.t, err) - setupReplicatorWaitSync(s, 0, cfg, sourceNode, targetNode) } func setupReplicatorWaitSync( s *state, startIndex int, cfg ConfigureReplicator, - sourceNode *net.Node, - targetNode *net.Node, + sourceNode clients.Client, + targetNode clients.Client, ) { sourceToTargetEvents := []int{0} targetToSourceEvents := []int{0} + + sourcePeerInfo := sourceNode.PeerInfo() + targetPeerInfo := targetNode.PeerInfo() + docIDsSyncedToSource := map[int]struct{}{} waitIndex := 0 currentDocID := 0 @@ -366,11 +388,11 @@ func setupReplicatorWaitSync( ready <- struct{}{} for waitIndex := 0; waitIndex < len(sourceToTargetEvents); waitIndex++ { for i := 0; i < targetToSourceEvents[waitIndex]; i++ { - err := sourceNode.WaitForPushLogByPeerEvent(targetNode.PeerID()) + err := sourceNode.WaitForPushLogByPeerEvent(targetPeerInfo.ID) require.NoError(s.t, err) } for i := 0; i < sourceToTargetEvents[waitIndex]; i++ { - err := targetNode.WaitForPushLogByPeerEvent(sourceNode.PeerID()) + err := targetNode.WaitForPushLogByPeerEvent(sourcePeerInfo.ID) require.NoError(s.t, err) } nodeSynced <- struct{}{} @@ -391,23 +413,18 @@ func subscribeToCollection( ) { n := s.nodes[action.NodeID] - schemaIDs := []string{} + schemaRoots := []string{} for _, collectionIndex := range action.CollectionIDs { if collectionIndex == NonExistentCollectionID { - schemaIDs = append(schemaIDs, "NonExistentCollectionID") + schemaRoots = append(schemaRoots, NonExistentCollectionSchemaRoot) continue } col := s.collections[action.NodeID][collectionIndex] - schemaIDs = append(schemaIDs, col.SchemaID()) + schemaRoots = append(schemaRoots, col.SchemaRoot()) } - _, err := n.Peer.AddP2PCollections( - s.ctx, - &pb.AddP2PCollectionsRequest{ - Collections: schemaIDs, - }, - ) + err := n.AddP2PCollections(s.ctx, schemaRoots) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -426,23 +443,18 @@ func unsubscribeToCollection( ) { n := s.nodes[action.NodeID] - schemaIDs := []string{} + schemaRoots := []string{} for _, collectionIndex := range action.CollectionIDs { if collectionIndex == NonExistentCollectionID { - schemaIDs = append(schemaIDs, "NonExistentCollectionID") + schemaRoots = append(schemaRoots, NonExistentCollectionSchemaRoot) continue } col := s.collections[action.NodeID][collectionIndex] - schemaIDs = append(schemaIDs, col.SchemaID()) + schemaRoots = append(schemaRoots, col.SchemaRoot()) } - _, err := n.Peer.RemoveP2PCollections( - s.ctx, - &pb.RemoveP2PCollectionsRequest{ - Collections: schemaIDs, - }, - ) + err := n.RemoveP2PCollections(s.ctx, schemaRoots) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -460,26 +472,17 @@ func getAllP2PCollections( s *state, action GetAllP2PCollections, ) { - expectedCollections := []*pb.GetAllP2PCollectionsReply_Collection{} + expectedCollections := []string{} for _, collectionIndex := range action.ExpectedCollectionIDs { col := s.collections[action.NodeID][collectionIndex] - expectedCollections = append( - expectedCollections, - &pb.GetAllP2PCollectionsReply_Collection{ - Id: col.SchemaID(), - Name: col.Name(), - }, - ) + expectedCollections = append(expectedCollections, col.SchemaRoot()) } n := s.nodes[action.NodeID] - cols, err := n.Peer.GetAllP2PCollections( - s.ctx, - &pb.GetAllP2PCollectionsRequest{}, - ) + cols, err := n.GetAllP2PCollections(s.ctx) require.NoError(s.t, err) - assert.Equal(s.t, expectedCollections, cols.Collections) + assert.Equal(s.t, expectedCollections, cols) } // waitForSync waits for all given wait channels to receive an item signaling completion. @@ -490,26 +493,40 @@ func waitForSync( s *state, action WaitForSync, ) { + var timeout time.Duration + if action.ExpectedTimeout != 0 { + timeout = action.ExpectedTimeout + } else { + timeout = subscriptionTimeout * 10 + } + for _, resultsChan := range s.syncChans { select { case <-resultsChan: - continue + assert.True( + s.t, + action.ExpectedTimeout == 0, + "unexpected document has been synced", + s.testCase.Description, + ) // a safety in case the stream hangs - we don't want the tests to run forever. - case <-time.After(subscriptionTimeout * 10): - assert.Fail(s.t, "timeout occurred while waiting for data stream", s.testCase.Description) + case <-time.After(timeout): + assert.True( + s.t, + action.ExpectedTimeout != 0, + "timeout occurred while waiting for data stream", + s.testCase.Description, + ) } } } -const randomMultiaddr = "/ip4/0.0.0.0/tcp/0" - func RandomNetworkingConfig() ConfigureNode { return func() config.Config { cfg := config.DefaultConfig() - cfg.Net.P2PAddress = randomMultiaddr - cfg.Net.RPCAddress = "0.0.0.0:0" - cfg.Net.TCPAddress = randomMultiaddr + cfg.Net.P2PAddress = "/ip4/127.0.0.1/tcp/0" + cfg.Net.RelayEnabled = false return *cfg } } diff --git a/tests/integration/query/commits/simple_test.go b/tests/integration/query/commits/simple_test.go index ffd558f2ee..1ee63bcedd 100644 --- a/tests/integration/query/commits/simple_test.go +++ b/tests/integration/query/commits/simple_test.go @@ -36,13 +36,13 @@ func TestQueryCommits(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", }, }, }, @@ -79,22 +79,22 @@ func TestQueryCommitsMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiepxkcirsd56mtrnzv3nbuqsfjeu77jk7dw5pmxvzmye2agyhzfta", + "cid": "bafybeiftg4c3aioppm2mn5f7wuqynbezricqdzpvspkd74jm7lq2jrst6m", }, { - "cid": "bafybeidjkzqj3yub3k4sulnaa444zuuy7yo2ku4gl4qxvmcopgoynafyae", + "cid": "bafybeielma57bnbv5oizjsv7szhu6jq45rxfcdof62opaygyyqp2j7qd5e", }, { - "cid": "bafybeiexzvyjil7s5cxicetgu4kriiuqspbzm7hpd353q5kmqbpqky26hq", + "cid": "bafybeigvf4bcuc53dphwniloxt3kqqoersoghdprxsjkb6xqq7wup34usy", }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", }, }, }, @@ -125,16 +125,16 @@ func TestQueryCommitsWithSchemaVersionIdField(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", - "schemaVersionId": "bafkreicihc56up4gzd4pf6lsmg5fc7dugyuigoaywgtjwy5c2suvj5zhtm", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", + "schemaVersionId": "bafkreictcre4pylafzzoh5lpgbetdodunz4r6pz3ormdzzpsz2lqtp4v34", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", - "schemaVersionId": "bafkreicihc56up4gzd4pf6lsmg5fc7dugyuigoaywgtjwy5c2suvj5zhtm", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", + "schemaVersionId": "bafkreictcre4pylafzzoh5lpgbetdodunz4r6pz3ormdzzpsz2lqtp4v34", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", - "schemaVersionId": "bafkreicihc56up4gzd4pf6lsmg5fc7dugyuigoaywgtjwy5c2suvj5zhtm", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", + "schemaVersionId": "bafkreictcre4pylafzzoh5lpgbetdodunz4r6pz3ormdzzpsz2lqtp4v34", }, }, }, diff --git a/tests/integration/query/commits/with_cid_test.go b/tests/integration/query/commits/with_cid_test.go index 6d8c30d73e..46d767620e 100644 --- a/tests/integration/query/commits/with_cid_test.go +++ b/tests/integration/query/commits/with_cid_test.go @@ -38,14 +38,14 @@ func TestQueryCommitsWithCid(t *testing.T) { testUtils.Request{ Request: `query { commits( - cid: "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4" + cid: "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", }, }, }, @@ -71,14 +71,14 @@ func TestQueryCommitsWithCidForFieldCommit(t *testing.T) { testUtils.Request{ Request: `query { commits( - cid: "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq" + cid: "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", }, }, }, diff --git a/tests/integration/query/commits/with_depth_test.go b/tests/integration/query/commits/with_depth_test.go index 12acde76e5..f3bc9bc35c 100644 --- a/tests/integration/query/commits/with_depth_test.go +++ b/tests/integration/query/commits/with_depth_test.go @@ -36,13 +36,13 @@ func TestQueryCommitsWithDepth1(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", }, }, }, @@ -81,16 +81,16 @@ func TestQueryCommitsWithDepth1WithUpdate(t *testing.T) { Results: []map[string]any{ { // "Age" field head - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", "height": int64(2), }, { // "Name" field head (unchanged from create) - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "height": int64(1), }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeige35bkafoez4cf4v6hgdkm5iaqcuqfq4bkyt7fxeycbdnqtbr7g4", "height": int64(2), }, }, @@ -137,27 +137,27 @@ func TestQueryCommitsWithDepth2WithUpdate(t *testing.T) { Results: []map[string]any{ { // Composite head - "cid": "bafybeihccn3utqsaxzsh6i7dlnd45rutcg7fbsogfw4vvigii7laedslqe", + "cid": "bafybeietneua73vfrkvfefw5rmju7yhee6rywychdbls5xqmqtqmzfckzq", "height": int64(3), }, { // Composite head -1 - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", "height": int64(2), }, { // "Name" field head (unchanged from create) - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "height": int64(1), }, { // "Age" field head - "cid": "bafybeiegusf5ypa7htxwa6u4fvne3lqq2jafe4fxllh4lo6iw4xdsn4yyq", + "cid": "bafybeidwgrk2xyu25pmwvpkfs4hnswtgej6gopkf26jrgm6lpbofa3rs3e", "height": int64(3), }, { // "Age" field head -1 - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeige35bkafoez4cf4v6hgdkm5iaqcuqfq4bkyt7fxeycbdnqtbr7g4", "height": int64(2), }, }, @@ -195,22 +195,22 @@ func TestQueryCommitsWithDepth1AndMultipleDocs(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihncdw7dmswtccv7sluutfb36wunsunxjtt6i3tjgsdrum23nff3y", + "cid": "bafybeihayvvwwsjvd3yefenc4ubebriluyg4rdzxmizrhefk4agotcqlp4", }, { - "cid": "bafybeibcs77pp5dy7qnph5fm3n6bhw74opbj2b6t66dfau37agoqvhypfm", + "cid": "bafybeiezcqlaqvozdw3ogdf2dxukwrf5m3xydd7lyy6ylcqycx5uqqepfm", }, { - "cid": "bafybeidy7d44vt5aizivwq6oejqejkog7hl43ckjc35yoidw5qv5kngfma", + "cid": "bafybeicr2lalkqj6weqcafm32posw22hjmybwohau57eswg5a442qilc2q", }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", }, }, }, diff --git a/tests/integration/query/commits/with_dockey_cid_test.go b/tests/integration/query/commits/with_dockey_cid_test.go index c6c9c3e7e0..be1c3bf580 100644 --- a/tests/integration/query/commits/with_dockey_cid_test.go +++ b/tests/integration/query/commits/with_dockey_cid_test.go @@ -104,14 +104,14 @@ func TestQueryCommitsWithDockeyAndCid(t *testing.T) { Request: ` { commits( dockey: "bae-f54b9689-e06e-5e3a-89b3-f3aee8e64ca7", - cid: "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe" + cid: "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm" ) { cid } }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", }, }, }, diff --git a/tests/integration/query/commits/with_dockey_count_test.go b/tests/integration/query/commits/with_dockey_count_test.go index dc64c6847b..7a61fa5fef 100644 --- a/tests/integration/query/commits/with_dockey_count_test.go +++ b/tests/integration/query/commits/with_dockey_count_test.go @@ -37,15 +37,15 @@ func TestQueryCommitsWithDockeyAndLinkCount(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "_count": 0, }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "_count": 0, }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "_count": 2, }, }, diff --git a/tests/integration/query/commits/with_dockey_field_test.go b/tests/integration/query/commits/with_dockey_field_test.go index 77857a23fe..b588300fb6 100644 --- a/tests/integration/query/commits/with_dockey_field_test.go +++ b/tests/integration/query/commits/with_dockey_field_test.go @@ -118,7 +118,7 @@ func TestQueryCommitsWithDockeyAndFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", }, }, }, @@ -150,7 +150,7 @@ func TestQueryCommitsWithDockeyAndCompositeFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", }, }, }, diff --git a/tests/integration/query/commits/with_dockey_limit_offset_test.go b/tests/integration/query/commits/with_dockey_limit_offset_test.go index 3ec10284e3..a8c6665bca 100644 --- a/tests/integration/query/commits/with_dockey_limit_offset_test.go +++ b/tests/integration/query/commits/with_dockey_limit_offset_test.go @@ -57,10 +57,10 @@ func TestQueryCommitsWithDockeyAndLimitAndOffset(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihccn3utqsaxzsh6i7dlnd45rutcg7fbsogfw4vvigii7laedslqe", + "cid": "bafybeietneua73vfrkvfefw5rmju7yhee6rywychdbls5xqmqtqmzfckzq", }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", }, }, }, diff --git a/tests/integration/query/commits/with_dockey_limit_test.go b/tests/integration/query/commits/with_dockey_limit_test.go index 4b87bfa307..b9f8e51f8b 100644 --- a/tests/integration/query/commits/with_dockey_limit_test.go +++ b/tests/integration/query/commits/with_dockey_limit_test.go @@ -50,10 +50,10 @@ func TestQueryCommitsWithDockeyAndLimit(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeihccn3utqsaxzsh6i7dlnd45rutcg7fbsogfw4vvigii7laedslqe", + "cid": "bafybeietneua73vfrkvfefw5rmju7yhee6rywychdbls5xqmqtqmzfckzq", }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", }, }, }, diff --git a/tests/integration/query/commits/with_dockey_order_limit_offset_test.go b/tests/integration/query/commits/with_dockey_order_limit_offset_test.go index 1b1a8fe885..195e2b3a8e 100644 --- a/tests/integration/query/commits/with_dockey_order_limit_offset_test.go +++ b/tests/integration/query/commits/with_dockey_order_limit_offset_test.go @@ -58,11 +58,11 @@ func TestQueryCommitsWithDockeyAndOrderAndLimitAndOffset(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeige35bkafoez4cf4v6hgdkm5iaqcuqfq4bkyt7fxeycbdnqtbr7g4", "height": int64(2), }, { - "cid": "bafybeihccn3utqsaxzsh6i7dlnd45rutcg7fbsogfw4vvigii7laedslqe", + "cid": "bafybeietneua73vfrkvfefw5rmju7yhee6rywychdbls5xqmqtqmzfckzq", "height": int64(3), }, }, diff --git a/tests/integration/query/commits/with_dockey_order_test.go b/tests/integration/query/commits/with_dockey_order_test.go index c2d1aac620..2b4e8f6156 100644 --- a/tests/integration/query/commits/with_dockey_order_test.go +++ b/tests/integration/query/commits/with_dockey_order_test.go @@ -44,23 +44,23 @@ func TestQueryCommitsWithDockeyAndOrderHeightDesc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", "height": int64(2), }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeige35bkafoez4cf4v6hgdkm5iaqcuqfq4bkyt7fxeycbdnqtbr7g4", "height": int64(2), }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "height": int64(1), }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "height": int64(1), }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "height": int64(1), }, }, @@ -99,23 +99,23 @@ func TestQueryCommitsWithDockeyAndOrderHeightAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "height": int64(1), }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "height": int64(1), }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "height": int64(1), }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", "height": int64(2), }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeige35bkafoez4cf4v6hgdkm5iaqcuqfq4bkyt7fxeycbdnqtbr7g4", "height": int64(2), }, }, @@ -154,24 +154,24 @@ func TestQueryCommitsWithDockeyAndOrderCidDesc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "height": int64(1), }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", - "height": int64(1), + "cid": "bafybeige35bkafoez4cf4v6hgdkm5iaqcuqfq4bkyt7fxeycbdnqtbr7g4", + "height": int64(2), }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", - "height": int64(1), + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", + "height": int64(2), }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", - "height": int64(2), + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", + "height": int64(1), }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", - "height": int64(2), + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", + "height": int64(1), }, }, }, @@ -209,23 +209,23 @@ func TestQueryCommitsWithDockeyAndOrderCidAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", - "height": int64(2), + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", + "height": int64(1), }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", - "height": int64(2), + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", + "height": int64(1), }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", - "height": int64(1), + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", + "height": int64(2), }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", - "height": int64(1), + "cid": "bafybeige35bkafoez4cf4v6hgdkm5iaqcuqfq4bkyt7fxeycbdnqtbr7g4", + "height": int64(2), }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "height": int64(1), }, }, @@ -278,39 +278,39 @@ func TestQueryCommitsWithDockeyAndOrderAndMultiUpdatesCidAsc(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "height": int64(1), }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "height": int64(1), }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "height": int64(1), }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", "height": int64(2), }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeige35bkafoez4cf4v6hgdkm5iaqcuqfq4bkyt7fxeycbdnqtbr7g4", "height": int64(2), }, { - "cid": "bafybeihccn3utqsaxzsh6i7dlnd45rutcg7fbsogfw4vvigii7laedslqe", + "cid": "bafybeietneua73vfrkvfefw5rmju7yhee6rywychdbls5xqmqtqmzfckzq", "height": int64(3), }, { - "cid": "bafybeiegusf5ypa7htxwa6u4fvne3lqq2jafe4fxllh4lo6iw4xdsn4yyq", + "cid": "bafybeidwgrk2xyu25pmwvpkfs4hnswtgej6gopkf26jrgm6lpbofa3rs3e", "height": int64(3), }, { - "cid": "bafybeigicex7hqzhzltm3adsx34rnzhp7lgubtrusxukk54whosmtfun7y", + "cid": "bafybeiahvakoy5joy563em7hlzvqcarxqdp2nin4gnxythoj4fqjh7umzu", "height": int64(4), }, { - "cid": "bafybeihv6d4fo7q5pziriv4rz3loq6unr3fegdonjcuyw5stano5r7dm4i", + "cid": "bafybeighft6vokgntjvpirwdt233xizmnhxtawiqeahwypxv7u26dwseoe", "height": int64(4), }, }, diff --git a/tests/integration/query/commits/with_dockey_test.go b/tests/integration/query/commits/with_dockey_test.go index 8e21007f3e..9dde4bc41a 100644 --- a/tests/integration/query/commits/with_dockey_test.go +++ b/tests/integration/query/commits/with_dockey_test.go @@ -62,13 +62,13 @@ func TestQueryCommitsWithDockey(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", }, }, }, @@ -102,22 +102,22 @@ func TestQueryCommitsWithDockeyAndLinks(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "links": []map[string]any{}, }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "links": []map[string]any{}, }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "links": []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "name": "age", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "name": "name", }, }, @@ -158,23 +158,23 @@ func TestQueryCommitsWithDockeyAndUpdate(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", "height": int64(2), }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "height": int64(1), }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "height": int64(1), }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeige35bkafoez4cf4v6hgdkm5iaqcuqfq4bkyt7fxeycbdnqtbr7g4", "height": int64(2), }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "height": int64(1), }, }, @@ -219,44 +219,44 @@ func TestQueryCommitsWithDockeyAndUpdateAndLinks(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", "links": []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "name": "_head", }, }, }, { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "links": []map[string]any{}, }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "links": []map[string]any{}, }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeige35bkafoez4cf4v6hgdkm5iaqcuqfq4bkyt7fxeycbdnqtbr7g4", "links": []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "name": "_head", }, { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", "name": "age", }, }, }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "links": []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "name": "age", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "name": "name", }, }, diff --git a/tests/integration/query/commits/with_dockey_typename_test.go b/tests/integration/query/commits/with_dockey_typename_test.go index 106d0ff326..f8573785f4 100644 --- a/tests/integration/query/commits/with_dockey_typename_test.go +++ b/tests/integration/query/commits/with_dockey_typename_test.go @@ -37,15 +37,15 @@ func TestQueryCommitsWithDockeyWithTypeName(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "__typename": "Commit", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "__typename": "Commit", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "__typename": "Commit", }, }, diff --git a/tests/integration/query/commits/with_field_test.go b/tests/integration/query/commits/with_field_test.go index e355db1710..008dc871d4 100644 --- a/tests/integration/query/commits/with_field_test.go +++ b/tests/integration/query/commits/with_field_test.go @@ -66,7 +66,7 @@ func TestQueryCommitsWithFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", }, }, }, @@ -98,7 +98,7 @@ func TestQueryCommitsWithCompositeFieldId(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", }, }, }, @@ -131,8 +131,8 @@ func TestQueryCommitsWithCompositeFieldIdWithReturnedSchemaVersionId(t *testing. }`, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", - "schemaVersionId": "bafkreicihc56up4gzd4pf6lsmg5fc7dugyuigoaywgtjwy5c2suvj5zhtm", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", + "schemaVersionId": "bafkreictcre4pylafzzoh5lpgbetdodunz4r6pz3ormdzzpsz2lqtp4v34", }, }, }, diff --git a/tests/integration/query/commits/with_group_test.go b/tests/integration/query/commits/with_group_test.go index 86822aac06..d031d70540 100644 --- a/tests/integration/query/commits/with_group_test.go +++ b/tests/integration/query/commits/with_group_test.go @@ -89,10 +89,10 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { "height": int64(2), "_group": []map[string]any{ { - "cid": "bafybeibvzg7f2p772ev3srlzt4w5jjwlo3nw4chtd6ewuvbrnlidzqtmr4", + "cid": "bafybeift3qzwhklfpkgszvrmbfzb6zp3g3cqryhjkuaoz3kp2yrj763jce", }, { - "cid": "bafybeiahsvsfxvytbmyek7mjzh666y2qz2jlfse4fdgwzx4lnunuukurcm", + "cid": "bafybeige35bkafoez4cf4v6hgdkm5iaqcuqfq4bkyt7fxeycbdnqtbr7g4", }, }, }, @@ -100,13 +100,13 @@ func TestQueryCommitsWithGroupByHeightWithChild(t *testing.T) { "height": int64(1), "_group": []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", }, }, }, @@ -142,7 +142,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }`, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "_group": []map[string]any{ { "height": int64(1), @@ -150,7 +150,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "_group": []map[string]any{ { "height": int64(1), @@ -158,7 +158,7 @@ func TestQueryCommitsWithGroupByCidWithChild(t *testing.T) { }, }, { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "_group": []map[string]any{ { "height": int64(1), diff --git a/tests/integration/query/latest_commits/with_dockey_field_test.go b/tests/integration/query/latest_commits/with_dockey_field_test.go index f85689c982..dce5da651f 100644 --- a/tests/integration/query/latest_commits/with_dockey_field_test.go +++ b/tests/integration/query/latest_commits/with_dockey_field_test.go @@ -68,7 +68,7 @@ func TestQueryLatestCommitsWithDocKeyAndFieldId(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "links": []map[string]any{}, }, }, @@ -101,14 +101,14 @@ func TestQueryLatestCommitsWithDocKeyAndCompositeFieldId(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "links": []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "name": "age", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "name": "name", }, }, diff --git a/tests/integration/query/latest_commits/with_dockey_test.go b/tests/integration/query/latest_commits/with_dockey_test.go index 6fb8771f48..e07d34836f 100644 --- a/tests/integration/query/latest_commits/with_dockey_test.go +++ b/tests/integration/query/latest_commits/with_dockey_test.go @@ -38,14 +38,14 @@ func TestQueryLatestCommitsWithDocKey(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", "links": []map[string]any{ { - "cid": "bafybeic5oodfpnixl6uf4bi63m3eouuhj3gafudlsd4tqryhx2wy7rczoe", + "cid": "bafybeiazsz3twea2uxpen6452qqa7qnzp2xildfxliidhqk632jpvbixkm", "name": "age", }, { - "cid": "bafybeifukwb3t73k7pph3ctp5khosoycp53ywjl6btravzk6decggkjtl4", + "cid": "bafybeidzukbs36cwwhab4rkpi6jfhhxse2vjtc5tf767qda5valcinilmy", "name": "name", }, }, @@ -75,8 +75,8 @@ func TestQueryLatestCommitsWithDocKeyWithSchemaVersionIdField(t *testing.T) { }, Results: []map[string]any{ { - "cid": "bafybeig3wrpwi6q7vjchizcwnenslasyxop6wey7jahbiszlubdglfq2fq", - "schemaVersionId": "bafkreicihc56up4gzd4pf6lsmg5fc7dugyuigoaywgtjwy5c2suvj5zhtm", + "cid": "bafybeihbcl2ijavd6vdcj4vgunw4q5qt5itmumxw7iy7fhoqfsuvkpkqeq", + "schemaVersionId": "bafkreictcre4pylafzzoh5lpgbetdodunz4r6pz3ormdzzpsz2lqtp4v34", }, }, } diff --git a/tests/integration/query/one_to_many/simple_test.go b/tests/integration/query/one_to_many/simple_test.go index 9e4ad72fd5..63f27c3e17 100644 --- a/tests/integration/query/one_to_many/simple_test.go +++ b/tests/integration/query/one_to_many/simple_test.go @@ -54,7 +54,7 @@ func TestQueryOneToMany(t *testing.T) { "rating": 4.9, "author": map[string]any{ "name": "John Grisham", - "age": uint64(65), + "age": int64(65), }, }, }, @@ -109,7 +109,7 @@ func TestQueryOneToMany(t *testing.T) { Results: []map[string]any{ { "name": "John Grisham", - "age": uint64(65), + "age": int64(65), "published": []map[string]any{ { "name": "Painted House", @@ -123,7 +123,7 @@ func TestQueryOneToMany(t *testing.T) { }, { "name": "Cornelia Funke", - "age": uint64(62), + "age": int64(62), "published": []map[string]any{ { "name": "Theif Lord", diff --git a/tests/integration/query/one_to_many/with_cid_dockey_test.go b/tests/integration/query/one_to_many/with_cid_dockey_test.go index aa8dd1906e..be7589c707 100644 --- a/tests/integration/query/one_to_many/with_cid_dockey_test.go +++ b/tests/integration/query/one_to_many/with_cid_dockey_test.go @@ -68,7 +68,7 @@ func TestQueryOneToManyWithCidAndDocKey(t *testing.T) { Description: "One-to-many relation query from one side with cid and dockey", Request: `query { Book ( - cid: "bafybeie3srbs3vyirntnaubjwn7i3cltht3mfbvpiiahxvkw5yvmte2fne" + cid: "bafybeigq7vjp6btvgms2k6ajgtcvygv4bvejk5pin44jbib43rwqa2j64q" dockey: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" ) { name @@ -117,7 +117,7 @@ func TestQueryOneToManyWithChildUpdateAndFirstCidAndDocKey(t *testing.T) { Description: "One-to-many relation query from one side with child update and parent cid and dockey", Request: `query { Book ( - cid: "bafybeie3srbs3vyirntnaubjwn7i3cltht3mfbvpiiahxvkw5yvmte2fne", + cid: "bafybeigq7vjp6btvgms2k6ajgtcvygv4bvejk5pin44jbib43rwqa2j64q", dockey: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" ) { name @@ -159,7 +159,7 @@ func TestQueryOneToManyWithChildUpdateAndFirstCidAndDocKey(t *testing.T) { "name": "Painted House", "author": map[string]any{ "name": "John Grisham", - "age": uint64(22), + "age": int64(22), }, }, }, @@ -173,7 +173,7 @@ func TestQueryOneToManyWithParentUpdateAndFirstCidAndDocKey(t *testing.T) { Description: "One-to-many relation query from one side with parent update and parent cid and dockey", Request: `query { Book ( - cid: "bafybeie3srbs3vyirntnaubjwn7i3cltht3mfbvpiiahxvkw5yvmte2fne", + cid: "bafybeigq7vjp6btvgms2k6ajgtcvygv4bvejk5pin44jbib43rwqa2j64q", dockey: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" ) { name @@ -229,7 +229,7 @@ func TestQueryOneToManyWithParentUpdateAndLastCidAndDocKey(t *testing.T) { Description: "One-to-many relation query from one side with parent update and parent cid and dockey", Request: `query { Book ( - cid: "bafybeiavnr6gu2ccfm2ygc2m2nsqbhnoorhjf2p6f2rq4rkggjz55je5ym", + cid: "bafybeigukwqfzjxvuaok53gradxpvz7ag6l73b77lpjdcfglizmnv6zurq", dockey: "bae-b9b83269-1f28-5c3b-ae75-3fb4c00d559d" ) { name diff --git a/tests/integration/query/one_to_many/with_filter_test.go b/tests/integration/query/one_to_many/with_filter_test.go index 322f1581bc..405e345801 100644 --- a/tests/integration/query/one_to_many/with_filter_test.go +++ b/tests/integration/query/one_to_many/with_filter_test.go @@ -80,7 +80,7 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParent(t *testing.T) { Results: []map[string]any{ { "name": "John Grisham", - "age": uint64(65), + "age": int64(65), "published": []map[string]any{ { "name": "Painted House", @@ -232,7 +232,7 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParentAndChild(t *testing.T Results: []map[string]any{ { "name": "John Grisham", - "age": uint64(65), + "age": int64(65), "published": []map[string]any{ { "name": "Painted House", @@ -316,7 +316,7 @@ func TestQueryOneToManyWithMultipleAliasedFilteredChildren(t *testing.T) { Results: []map[string]any{ { "name": "John Grisham", - "age": uint64(65), + "age": int64(65), "p1": []map[string]any{ { "name": "Painted House", @@ -332,7 +332,7 @@ func TestQueryOneToManyWithMultipleAliasedFilteredChildren(t *testing.T) { }, { "name": "Cornelia Funke", - "age": uint64(62), + "age": int64(62), "p1": []map[string]any{ { "name": "Theif Lord", diff --git a/tests/integration/query/one_to_many/with_group_filter_test.go b/tests/integration/query/one_to_many/with_group_filter_test.go index d018d858ce..05e5b1c573 100644 --- a/tests/integration/query/one_to_many/with_group_filter_test.go +++ b/tests/integration/query/one_to_many/with_group_filter_test.go @@ -90,7 +90,7 @@ func TestQueryOneToManyWithParentJoinGroupNumberAndNumberFilterOnJoin(t *testing }, Results: []map[string]any{ { - "age": uint64(327), + "age": int64(327), "_group": []map[string]any{ { "name": "Simon Pelloutier", @@ -112,7 +112,7 @@ func TestQueryOneToManyWithParentJoinGroupNumberAndNumberFilterOnJoin(t *testing }, }, { - "age": uint64(65), + "age": int64(65), "_group": []map[string]any{ { "name": "John Grisham", @@ -208,7 +208,7 @@ func TestQueryOneToManyWithParentJoinGroupNumberAndNumberFilterOnGroup(t *testin }, Results: []map[string]any{ { - "age": uint64(327), + "age": int64(327), "_group": []map[string]any{ { "name": "Voltaire", @@ -226,7 +226,7 @@ func TestQueryOneToManyWithParentJoinGroupNumberAndNumberFilterOnGroup(t *testin }, }, { - "age": uint64(65), + "age": int64(65), "_group": []map[string]any{ { "name": "John Grisham", @@ -330,7 +330,7 @@ func TestQueryOneToManyWithParentJoinGroupNumberAndNumberFilterOnGroupAndOnGroup }, Results: []map[string]any{ { - "age": uint64(327), + "age": int64(327), "_group": []map[string]any{ { "name": "Simon Pelloutier", diff --git a/tests/integration/query/one_to_many/with_group_related_id_alias_test.go b/tests/integration/query/one_to_many/with_group_related_id_alias_test.go index 8e2223e324..7c813d9359 100644 --- a/tests/integration/query/one_to_many/with_group_related_id_alias_test.go +++ b/tests/integration/query/one_to_many/with_group_related_id_alias_test.go @@ -99,7 +99,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAlias(t *t "name": "Candide", "rating": 4.95, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Voltaire", }, }, @@ -107,7 +107,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAlias(t *t "name": "Zadig", "rating": 4.91, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Voltaire", }, }, @@ -120,7 +120,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAlias(t *t "name": "The Client", "rating": 4.5, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -128,7 +128,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAlias(t *t "name": "Painted House", "rating": 4.9, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -136,7 +136,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAlias(t *t "name": "A Time for Mercy", "rating": 4.5, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -149,7 +149,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAlias(t *t "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2.0, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Simon Pelloutier", }, }, @@ -251,7 +251,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAliasAndRe "name": "Candide", "rating": 4.95, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Voltaire", }, }, @@ -259,7 +259,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAliasAndRe "name": "Zadig", "rating": 4.91, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Voltaire", }, }, @@ -275,7 +275,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAliasAndRe "name": "The Client", "rating": 4.5, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -283,7 +283,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAliasAndRe "name": "Painted House", "rating": 4.9, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -291,7 +291,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAliasAndRe "name": "A Time for Mercy", "rating": 4.5, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -307,7 +307,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeFromManySideUsingAliasAndRe "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2.0, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Simon Pelloutier", }, }, @@ -402,7 +402,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide "name": "Candide", "rating": 4.95, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Voltaire", }, }, @@ -410,7 +410,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide "name": "Zadig", "rating": 4.91, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Voltaire", }, }, @@ -423,7 +423,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide "name": "The Client", "rating": 4.5, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -431,7 +431,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide "name": "Painted House", "rating": 4.9, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -439,7 +439,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide "name": "A Time for Mercy", "rating": 4.5, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -452,7 +452,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2.0, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Simon Pelloutier", }, }, @@ -555,7 +555,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide "name": "Candide", "rating": 4.95, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Voltaire", }, }, @@ -563,7 +563,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide "name": "Zadig", "rating": 4.91, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Voltaire", }, }, @@ -580,7 +580,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide "name": "The Client", "rating": 4.5, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -588,7 +588,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide "name": "Painted House", "rating": 4.9, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -596,7 +596,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide "name": "A Time for Mercy", "rating": 4.5, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -613,7 +613,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeWithIDSelectionFromManySide "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2.0, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Simon Pelloutier", }, }, diff --git a/tests/integration/query/one_to_many/with_group_related_id_test.go b/tests/integration/query/one_to_many/with_group_related_id_test.go index 535e8665cd..6b6b6f331f 100644 --- a/tests/integration/query/one_to_many/with_group_related_id_test.go +++ b/tests/integration/query/one_to_many/with_group_related_id_test.go @@ -96,7 +96,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDFromManySide(t *testing.T "name": "Candide", "rating": 4.95, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Voltaire", }, }, @@ -104,7 +104,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDFromManySide(t *testing.T "name": "Zadig", "rating": 4.91, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Voltaire", }, }, @@ -117,7 +117,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDFromManySide(t *testing.T "name": "The Client", "rating": 4.5, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -125,7 +125,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDFromManySide(t *testing.T "name": "Painted House", "rating": 4.9, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -133,7 +133,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDFromManySide(t *testing.T "name": "A Time for Mercy", "rating": 4.5, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -146,7 +146,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDFromManySide(t *testing.T "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2.0, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Simon Pelloutier", }, }, @@ -238,7 +238,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDWithIDSelectionFromManySi "name": "Candide", "rating": 4.95, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Voltaire", }, }, @@ -246,7 +246,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDWithIDSelectionFromManySi "name": "Zadig", "rating": 4.91, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Voltaire", }, }, @@ -259,7 +259,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDWithIDSelectionFromManySi "name": "The Client", "rating": 4.5, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -267,7 +267,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDWithIDSelectionFromManySi "name": "Painted House", "rating": 4.9, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -275,7 +275,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDWithIDSelectionFromManySi "name": "A Time for Mercy", "rating": 4.5, "author": map[string]any{ - "age": uint64(65), + "age": int64(65), "name": "John Grisham", }, }, @@ -288,7 +288,7 @@ func TestQueryOneToManyWithParentGroupByOnRelatedTypeIDWithIDSelectionFromManySi "name": "Histoiare des Celtes et particulierement des Gaulois et des Germains depuis les temps fabuleux jusqua la prise de Roze par les Gaulois", "rating": 2.0, "author": map[string]any{ - "age": uint64(327), + "age": int64(327), "name": "Simon Pelloutier", }, }, diff --git a/tests/integration/query/one_to_many/with_group_test.go b/tests/integration/query/one_to_many/with_group_test.go index 9e4c1f174e..b56a6f5cea 100644 --- a/tests/integration/query/one_to_many/with_group_test.go +++ b/tests/integration/query/one_to_many/with_group_test.go @@ -75,7 +75,7 @@ func TestQueryOneToManyWithInnerJoinGroupNumber(t *testing.T) { Results: []map[string]any{ { "name": "John Grisham", - "age": uint64(65), + "age": int64(65), "published": []map[string]any{ { "rating": 4.5, @@ -100,7 +100,7 @@ func TestQueryOneToManyWithInnerJoinGroupNumber(t *testing.T) { }, { "name": "Cornelia Funke", - "age": uint64(62), + "age": int64(62), "published": []map[string]any{ { "rating": 4.8, @@ -195,7 +195,7 @@ func TestQueryOneToManyWithParentJoinGroupNumber(t *testing.T) { }, Results: []map[string]any{ { - "age": uint64(327), + "age": int64(327), "_group": []map[string]any{ { "name": "Simon Pelloutier", @@ -222,7 +222,7 @@ func TestQueryOneToManyWithParentJoinGroupNumber(t *testing.T) { }, }, { - "age": uint64(65), + "age": int64(65), "_group": []map[string]any{ { "name": "John Grisham", diff --git a/tests/integration/query/one_to_many/with_id_field_test.go b/tests/integration/query/one_to_many/with_id_field_test.go index 455ae3ab78..c51e5f8d4c 100644 --- a/tests/integration/query/one_to_many/with_id_field_test.go +++ b/tests/integration/query/one_to_many/with_id_field_test.go @@ -76,7 +76,7 @@ func TestQueryOneToManyWithIdFieldOnPrimary(t *testing.T) { }, { "name": "Painted House", - "author_id": uint64(123456), + "author_id": int64(123456), "author": nil, }, }, diff --git a/tests/integration/query/one_to_many/with_order_filter_limit_test.go b/tests/integration/query/one_to_many/with_order_filter_limit_test.go index e1eab73d44..8acee4db18 100644 --- a/tests/integration/query/one_to_many/with_order_filter_limit_test.go +++ b/tests/integration/query/one_to_many/with_order_filter_limit_test.go @@ -69,7 +69,7 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParentAndNumericSortAscendi Results: []map[string]any{ { "name": "John Grisham", - "age": uint64(65), + "age": int64(65), "published": []map[string]any{ { "name": "A Time for Mercy", @@ -136,7 +136,7 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParentAndNumericSortDescend Results: []map[string]any{ { "name": "John Grisham", - "age": uint64(65), + "age": int64(65), "published": []map[string]any{ { "name": "Painted House", diff --git a/tests/integration/query/one_to_many/with_order_filter_test.go b/tests/integration/query/one_to_many/with_order_filter_test.go index 1207d2772d..a7de6d5977 100644 --- a/tests/integration/query/one_to_many/with_order_filter_test.go +++ b/tests/integration/query/one_to_many/with_order_filter_test.go @@ -69,7 +69,7 @@ func TestQueryOneToManyWithNumericGreaterThanFilterOnParentAndNumericSortAscendi Results: []map[string]any{ { "name": "John Grisham", - "age": uint64(65), + "age": int64(65), "published": []map[string]any{ { "name": "A Time for Mercy", @@ -138,7 +138,7 @@ func TestQueryOneToManyWithNumericGreaterThanFilterAndNumericSortDescendingOnChi Results: []map[string]any{ { "name": "John Grisham", - "age": uint64(65), + "age": int64(65), "published": []map[string]any{ { "name": "Painted House", @@ -152,7 +152,7 @@ func TestQueryOneToManyWithNumericGreaterThanFilterAndNumericSortDescendingOnChi }, { "name": "Cornelia Funke", - "age": uint64(62), + "age": int64(62), "published": []map[string]any{ { "name": "Theif Lord", diff --git a/tests/integration/query/one_to_many_to_one/with_filter_test.go b/tests/integration/query/one_to_many_to_one/with_filter_test.go index 99890196bb..e02ae9e12c 100644 --- a/tests/integration/query/one_to_many_to_one/with_filter_test.go +++ b/tests/integration/query/one_to_many_to_one/with_filter_test.go @@ -113,7 +113,7 @@ func TestQueryComplexWithDeepFilterOnRenderedChildren(t *testing.T) { "book": []map[string]any{ { "publisher": map[string]any{ - "yearOpened": uint64(2022), + "yearOpened": int64(2022), }, }, }, @@ -242,25 +242,25 @@ func TestOneToManyToOneWithTwoLevelDeepFilter(t *testing.T) { { "name": "Sooley", "publisher": map[string]any{ - "yearOpened": uint64(1999), + "yearOpened": int64(1999), }, }, { "name": "Theif Lord", "publisher": map[string]any{ - "yearOpened": uint64(2020), + "yearOpened": int64(2020), }, }, { "name": "Painted House", "publisher": map[string]any{ - "yearOpened": uint64(1995), + "yearOpened": int64(1995), }, }, { "name": "A Time for Mercy", "publisher": map[string]any{ - "yearOpened": uint64(2013), + "yearOpened": int64(2013), }, }, }, @@ -271,7 +271,7 @@ func TestOneToManyToOneWithTwoLevelDeepFilter(t *testing.T) { { "name": "The Rooster Bar", "publisher": map[string]any{ - "yearOpened": uint64(2022), + "yearOpened": int64(2022), }, }, }, diff --git a/tests/integration/query/one_to_many_to_one/with_order_test.go b/tests/integration/query/one_to_many_to_one/with_order_test.go index 41bb88f544..91fccbe8d6 100644 --- a/tests/integration/query/one_to_many_to_one/with_order_test.go +++ b/tests/integration/query/one_to_many_to_one/with_order_test.go @@ -39,7 +39,7 @@ func TestMultipleOrderByWithDepthGreaterThanOne(t *testing.T) { "rating": 3.2, "publisher": map[string]any{ "name": "Only Publisher of Sooley", - "yearOpened": uint64(1999), + "yearOpened": int64(1999), }, }, { @@ -47,7 +47,7 @@ func TestMultipleOrderByWithDepthGreaterThanOne(t *testing.T) { "rating": 4.0, "publisher": map[string]any{ "name": "Only Publisher of The Rooster Bar", - "yearOpened": uint64(2022), + "yearOpened": int64(2022), }, }, { @@ -60,7 +60,7 @@ func TestMultipleOrderByWithDepthGreaterThanOne(t *testing.T) { "rating": 4.5, "publisher": map[string]any{ "name": "Only Publisher of A Time for Mercy", - "yearOpened": uint64(2013), + "yearOpened": int64(2013), }, }, { @@ -68,7 +68,7 @@ func TestMultipleOrderByWithDepthGreaterThanOne(t *testing.T) { "rating": 4.8, "publisher": map[string]any{ "name": "Only Publisher of Theif Lord", - "yearOpened": uint64(2020), + "yearOpened": int64(2020), }, }, { @@ -76,7 +76,7 @@ func TestMultipleOrderByWithDepthGreaterThanOne(t *testing.T) { "rating": 4.9, "publisher": map[string]any{ "name": "Only Publisher of Painted House", - "yearOpened": uint64(1995), + "yearOpened": int64(1995), }, }, }, @@ -110,7 +110,7 @@ func TestMultipleOrderByWithDepthGreaterThanOneOrderSwitched(t *testing.T) { "rating": 4.0, "publisher": map[string]any{ "name": "Only Publisher of The Rooster Bar", - "yearOpened": uint64(2022), + "yearOpened": int64(2022), }, }, { @@ -118,7 +118,7 @@ func TestMultipleOrderByWithDepthGreaterThanOneOrderSwitched(t *testing.T) { "rating": 4.8, "publisher": map[string]any{ "name": "Only Publisher of Theif Lord", - "yearOpened": uint64(2020), + "yearOpened": int64(2020), }, }, { @@ -126,7 +126,7 @@ func TestMultipleOrderByWithDepthGreaterThanOneOrderSwitched(t *testing.T) { "rating": 4.5, "publisher": map[string]any{ "name": "Only Publisher of A Time for Mercy", - "yearOpened": uint64(2013), + "yearOpened": int64(2013), }, }, { @@ -134,7 +134,7 @@ func TestMultipleOrderByWithDepthGreaterThanOneOrderSwitched(t *testing.T) { "rating": 3.2, "publisher": map[string]any{ "name": "Only Publisher of Sooley", - "yearOpened": uint64(1999), + "yearOpened": int64(1999), }, }, { @@ -142,7 +142,7 @@ func TestMultipleOrderByWithDepthGreaterThanOneOrderSwitched(t *testing.T) { "rating": 4.9, "publisher": map[string]any{ "name": "Only Publisher of Painted House", - "yearOpened": uint64(1995), + "yearOpened": int64(1995), }, }, { diff --git a/tests/integration/query/one_to_one/simple_test.go b/tests/integration/query/one_to_one/simple_test.go index 1fcefa0606..6f7f95b21e 100644 --- a/tests/integration/query/one_to_one/simple_test.go +++ b/tests/integration/query/one_to_one/simple_test.go @@ -54,7 +54,7 @@ func TestQueryOneToOne(t *testing.T) { "rating": 4.9, "author": map[string]any{ "name": "John Grisham", - "age": uint64(65), + "age": int64(65), }, }, }, @@ -92,7 +92,7 @@ func TestQueryOneToOne(t *testing.T) { Results: []map[string]any{ { "name": "John Grisham", - "age": uint64(65), + "age": int64(65), "published": map[string]any{ "name": "Painted House", "rating": 4.9, diff --git a/tests/integration/query/one_to_one/with_filter_test.go b/tests/integration/query/one_to_one/with_filter_test.go index a4b6abf6de..25b42d4268 100644 --- a/tests/integration/query/one_to_one/with_filter_test.go +++ b/tests/integration/query/one_to_one/with_filter_test.go @@ -58,7 +58,7 @@ func TestQueryOneToOneWithNumericFilterOnParent(t *testing.T) { "rating": 4.9, "author": map[string]any{ "name": "John Grisham", - "age": uint64(65), + "age": int64(65), }, }, }, @@ -111,7 +111,7 @@ func TestQueryOneToOneWithStringFilterOnChild(t *testing.T) { "rating": 4.9, "author": map[string]any{ "name": "John Grisham", - "age": uint64(65), + "age": int64(65), }, }, }, @@ -164,7 +164,7 @@ func TestQueryOneToOneWithBooleanFilterOnChild(t *testing.T) { "rating": 4.9, "author": map[string]any{ "name": "John Grisham", - "age": uint64(65), + "age": int64(65), }, }, }, @@ -235,7 +235,7 @@ func TestQueryOneToOneWithFilterThroughChildBackToParent(t *testing.T) { "rating": 4.9, "author": map[string]any{ "name": "John Grisham", - "age": uint64(65), + "age": int64(65), }, }, }, diff --git a/tests/integration/query/one_to_one/with_order_test.go b/tests/integration/query/one_to_one/with_order_test.go index 0b939f319c..eca937539a 100644 --- a/tests/integration/query/one_to_one/with_order_test.go +++ b/tests/integration/query/one_to_one/with_order_test.go @@ -67,7 +67,7 @@ func TestQueryOneToOneWithChildBooleanOrderDescending(t *testing.T) { "rating": 4.9, "author": map[string]any{ "name": "John Grisham", - "age": uint64(65), + "age": int64(65), }, }, { @@ -75,7 +75,7 @@ func TestQueryOneToOneWithChildBooleanOrderDescending(t *testing.T) { "rating": 4.8, "author": map[string]any{ "name": "Cornelia Funke", - "age": uint64(62), + "age": int64(62), }, }, }, @@ -135,7 +135,7 @@ func TestQueryOneToOneWithChildBooleanOrderAscending(t *testing.T) { "rating": 4.8, "author": map[string]any{ "name": "Cornelia Funke", - "age": uint64(62), + "age": int64(62), }, }, { @@ -143,7 +143,7 @@ func TestQueryOneToOneWithChildBooleanOrderAscending(t *testing.T) { "rating": 4.9, "author": map[string]any{ "name": "John Grisham", - "age": uint64(65), + "age": int64(65), }, }, }, diff --git a/tests/integration/query/one_to_two_many/simple_test.go b/tests/integration/query/one_to_two_many/simple_test.go index aef7ee8c1d..6a8fe674e2 100644 --- a/tests/integration/query/one_to_two_many/simple_test.go +++ b/tests/integration/query/one_to_two_many/simple_test.go @@ -80,7 +80,7 @@ func TestQueryOneToTwoManyWithNilUnnamedRelationship(t *testing.T) { }, "reviewedBy": map[string]any{ "name": "Cornelia Funke", - "age": uint64(62), + "age": int64(62), }, }, { @@ -91,7 +91,7 @@ func TestQueryOneToTwoManyWithNilUnnamedRelationship(t *testing.T) { }, "reviewedBy": map[string]any{ "name": "John Grisham", - "age": uint64(65), + "age": int64(65), }, }, { @@ -102,7 +102,7 @@ func TestQueryOneToTwoManyWithNilUnnamedRelationship(t *testing.T) { }, "reviewedBy": map[string]any{ "name": "Cornelia Funke", - "age": uint64(62), + "age": int64(62), }, }, }, @@ -163,7 +163,7 @@ func TestQueryOneToTwoManyWithNilUnnamedRelationship(t *testing.T) { Results: []map[string]any{ { "name": "John Grisham", - "age": uint64(65), + "age": int64(65), "reviewed": []map[string]any{ { "name": "Theif Lord", @@ -181,7 +181,7 @@ func TestQueryOneToTwoManyWithNilUnnamedRelationship(t *testing.T) { }, { "name": "Cornelia Funke", - "age": uint64(62), + "age": int64(62), "reviewed": []map[string]any{ { "name": "Painted House", @@ -290,7 +290,7 @@ func TestQueryOneToTwoManyWithNamedAndUnnamedRelationships(t *testing.T) { }, "reviewedBy": map[string]any{ "name": "John Grisham", - "age": uint64(65), + "age": int64(65), }, "price": map[string]any{ "currency": "GBP", @@ -305,7 +305,7 @@ func TestQueryOneToTwoManyWithNamedAndUnnamedRelationships(t *testing.T) { }, "reviewedBy": map[string]any{ "name": "Cornelia Funke", - "age": uint64(62), + "age": int64(62), }, "price": map[string]any{ "currency": "SEK", @@ -320,7 +320,7 @@ func TestQueryOneToTwoManyWithNamedAndUnnamedRelationships(t *testing.T) { }, "reviewedBy": map[string]any{ "name": "Cornelia Funke", - "age": uint64(62), + "age": int64(62), }, "price": map[string]any{ "currency": "GBP", @@ -403,7 +403,7 @@ func TestQueryOneToTwoManyWithNamedAndUnnamedRelationships(t *testing.T) { Results: []map[string]any{ { "name": "John Grisham", - "age": uint64(65), + "age": int64(65), "reviewed": []map[string]any{ { "name": "Theif Lord", @@ -427,7 +427,7 @@ func TestQueryOneToTwoManyWithNamedAndUnnamedRelationships(t *testing.T) { }, { "name": "Cornelia Funke", - "age": uint64(62), + "age": int64(62), "reviewed": []map[string]any{ { "name": "A Time for Mercy", diff --git a/tests/integration/query/simple/simple_test.go b/tests/integration/query/simple/simple_test.go index bb118cad18..6911b08ea8 100644 --- a/tests/integration/query/simple/simple_test.go +++ b/tests/integration/query/simple/simple_test.go @@ -38,7 +38,7 @@ func TestQuerySimple(t *testing.T) { { "_key": "bae-52b9170d-b77a-5887-b877-cbdbb99b009f", "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, } @@ -66,7 +66,7 @@ func TestQuerySimpleWithAlias(t *testing.T) { Results: []map[string]any{ { "username": "John", - "age": uint64(21), + "age": int64(21), }, }, } @@ -98,11 +98,11 @@ func TestQuerySimpleWithMultipleRows(t *testing.T) { Results: []map[string]any{ { "Name": "Bob", - "Age": uint64(27), + "Age": int64(27), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, } diff --git a/tests/integration/query/simple/with_cid_dockey_test.go b/tests/integration/query/simple/with_cid_dockey_test.go index 343da0bd9e..7bd1eb4971 100644 --- a/tests/integration/query/simple/with_cid_dockey_test.go +++ b/tests/integration/query/simple/with_cid_dockey_test.go @@ -73,7 +73,7 @@ func TestQuerySimpleWithCidAndDocKey(t *testing.T) { Description: "Simple query with cid and dockey", Request: `query { Users ( - cid: "bafybeicloiyf5zl5k54cjuhfg6rpsj7rnhxswvnuizpagd2kwq4px6aqn4", + cid: "bafybeieybepwqpy5h2d4sywksgvdqpjd44ciu223vrm7knumychpmucawy", dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" ) { Name @@ -102,7 +102,7 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocKey(t *testing.T) { Description: "Simple query with (first) cid and dockey", Request: `query { Users ( - cid: "bafybeicloiyf5zl5k54cjuhfg6rpsj7rnhxswvnuizpagd2kwq4px6aqn4", + cid: "bafybeieybepwqpy5h2d4sywksgvdqpjd44ciu223vrm7knumychpmucawy", dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" ) { Name @@ -130,7 +130,7 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocKey(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, } @@ -143,7 +143,7 @@ func TestQuerySimpleWithUpdateAndLastCidAndDocKey(t *testing.T) { Description: "Simple query with (last) cid and dockey", Request: `query { Users ( - cid: "bafybeih4is4so2tw47gfjsty6nk7fvcggd3uyq5tgiw4yjnobtbdwnqdoi" + cid: "bafybeiav54zfepx5n2zcm2g34q5ur5w2dosb2ssxjckq3esy5dg6nftxse" dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" ) { Name @@ -171,7 +171,7 @@ func TestQuerySimpleWithUpdateAndLastCidAndDocKey(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(23), + "Age": int64(23), }, }, } @@ -184,7 +184,7 @@ func TestQuerySimpleWithUpdateAndMiddleCidAndDocKey(t *testing.T) { Description: "Simple query with (middle) cid and dockey", Request: `query { Users ( - cid: "bafybeigvkbbe6e5ztwz7qtwu3xg2zj4stpiwkhyj6qbu6ir3qdmrd3bhem", + cid: "bafybeicrati3sbl3esju7eus3dwi53aggd6thhtporh7vj5mv77vvs3mdy", dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" ) { Name @@ -212,7 +212,7 @@ func TestQuerySimpleWithUpdateAndMiddleCidAndDocKey(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(22), + "Age": int64(22), }, }, } @@ -225,7 +225,7 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocKeyAndSchemaVersion(t *testing.T) Description: "Simple query with (first) cid and dockey and yielded schema version", Request: `query { Users ( - cid: "bafybeicloiyf5zl5k54cjuhfg6rpsj7rnhxswvnuizpagd2kwq4px6aqn4", + cid: "bafybeieybepwqpy5h2d4sywksgvdqpjd44ciu223vrm7knumychpmucawy", dockey: "bae-52b9170d-b77a-5887-b877-cbdbb99b009f" ) { Name @@ -256,10 +256,10 @@ func TestQuerySimpleWithUpdateAndFirstCidAndDocKeyAndSchemaVersion(t *testing.T) Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), "_version": []map[string]any{ { - "schemaVersionId": "bafkreicl3pjcorfcaexxmqcrilkhx7xl37o6b34nxgtiauygtl7hrqbhoq", + "schemaVersionId": "bafkreicqyapc7zxw5tt2ymybau5m54lhmm5ahrl22oaktnhidul757a4ba", }, }, }, diff --git a/tests/integration/query/simple/with_dockey_test.go b/tests/integration/query/simple/with_dockey_test.go index ac0aa962f1..5af4dac7ab 100644 --- a/tests/integration/query/simple/with_dockey_test.go +++ b/tests/integration/query/simple/with_dockey_test.go @@ -37,7 +37,7 @@ func TestQuerySimpleWithDocKeyFilter(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -82,7 +82,7 @@ func TestQuerySimpleWithDocKeyFilter(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, diff --git a/tests/integration/query/simple/with_dockeys_test.go b/tests/integration/query/simple/with_dockeys_test.go index 3945b22ac6..8bbd0067da 100644 --- a/tests/integration/query/simple/with_dockeys_test.go +++ b/tests/integration/query/simple/with_dockeys_test.go @@ -37,7 +37,7 @@ func TestQuerySimpleWithDocKeysFilter(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -82,7 +82,7 @@ func TestQuerySimpleWithDocKeysFilter(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -113,11 +113,11 @@ func TestQuerySimpleWithDocKeysFilter(t *testing.T) { Results: []map[string]any{ { "Name": "Jim", - "Age": uint64(27), + "Age": int64(27), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, diff --git a/tests/integration/query/simple/with_filter/with_and_test.go b/tests/integration/query/simple/with_filter/with_and_test.go index f3a59a1409..6ec50be686 100644 --- a/tests/integration/query/simple/with_filter/with_and_test.go +++ b/tests/integration/query/simple/with_filter/with_and_test.go @@ -48,11 +48,11 @@ func TestQuerySimpleWithIntGreaterThanAndIntLessThanFilter(t *testing.T) { Results: []map[string]any{ { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, } diff --git a/tests/integration/query/simple/with_filter/with_eq_datetime_test.go b/tests/integration/query/simple/with_filter/with_eq_datetime_test.go index 11092e9d0d..10214fad92 100644 --- a/tests/integration/query/simple/with_filter/with_eq_datetime_test.go +++ b/tests/integration/query/simple/with_filter/with_eq_datetime_test.go @@ -43,7 +43,7 @@ func TestQuerySimpleWithDateTimeEqualsFilterBlock(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), "CreatedAt": "2017-07-23T03:46:56.647Z", }, }, @@ -83,7 +83,7 @@ func TestQuerySimpleWithDateTimeEqualsNilFilterBlock(t *testing.T) { Results: []map[string]any{ { "Name": "Fred", - "Age": uint64(44), + "Age": int64(44), "CreatedAt": nil, }, }, diff --git a/tests/integration/query/simple/with_filter/with_eq_int_test.go b/tests/integration/query/simple/with_filter/with_eq_int_test.go index e58929975f..067c2d2198 100644 --- a/tests/integration/query/simple/with_filter/with_eq_int_test.go +++ b/tests/integration/query/simple/with_filter/with_eq_int_test.go @@ -40,7 +40,7 @@ func TestQuerySimpleWithIntEqualsFilterBlock(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, } diff --git a/tests/integration/query/simple/with_filter/with_eq_string_test.go b/tests/integration/query/simple/with_filter/with_eq_string_test.go index 02253153fb..18b03bee58 100644 --- a/tests/integration/query/simple/with_filter/with_eq_string_test.go +++ b/tests/integration/query/simple/with_filter/with_eq_string_test.go @@ -40,7 +40,7 @@ func TestQuerySimpleWithStringFilterBlock(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, } @@ -75,7 +75,7 @@ func TestQuerySimpleWithStringEqualsNilFilterBlock(t *testing.T) { Results: []map[string]any{ { "Name": nil, - "Age": uint64(60), + "Age": int64(60), }, }, } @@ -131,7 +131,7 @@ func TestQuerySimpleWithStringFilterBlockAndSelect(t *testing.T) { }, Results: []map[string]any{ { - "Age": uint64(21), + "Age": int64(21), }, }, }, diff --git a/tests/integration/query/simple/with_filter/with_gt_int_test.go b/tests/integration/query/simple/with_filter/with_gt_int_test.go index 40c14eb5e3..ecafd44ee4 100644 --- a/tests/integration/query/simple/with_filter/with_gt_int_test.go +++ b/tests/integration/query/simple/with_filter/with_gt_int_test.go @@ -41,7 +41,7 @@ func TestQuerySimpleWithIntGreaterThanFilterBlock(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -90,11 +90,11 @@ func TestQuerySimpleWithIntGreaterThanFilterBlock(t *testing.T) { Results: []map[string]any{ { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, diff --git a/tests/integration/query/simple/with_filter/with_in_test.go b/tests/integration/query/simple/with_filter/with_in_test.go index 4725925f97..a43f19c37b 100644 --- a/tests/integration/query/simple/with_filter/with_in_test.go +++ b/tests/integration/query/simple/with_filter/with_in_test.go @@ -48,11 +48,11 @@ func TestQuerySimpleWithIntInFilter(t *testing.T) { Results: []map[string]any{ { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), }, { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), }, }, } @@ -99,11 +99,11 @@ func TestQuerySimpleWithIntInFilterWithNullValue(t *testing.T) { }, { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), }, { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), }, }, } diff --git a/tests/integration/query/simple/with_filter/with_ne_string_test.go b/tests/integration/query/simple/with_filter/with_ne_string_test.go index 5b3aed8de8..4142eac647 100644 --- a/tests/integration/query/simple/with_filter/with_ne_string_test.go +++ b/tests/integration/query/simple/with_filter/with_ne_string_test.go @@ -38,7 +38,7 @@ func TestQuerySimpleWithStringNotEqualsFilterBlock(t *testing.T) { }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, }, } @@ -71,10 +71,10 @@ func TestQuerySimpleWithStringNotEqualsNilFilterBlock(t *testing.T) { }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, { - "Age": uint64(21), + "Age": int64(21), }, }, } diff --git a/tests/integration/query/simple/with_filter/with_not_test.go b/tests/integration/query/simple/with_filter/with_not_test.go index 3b5832bcdb..2ce454a358 100644 --- a/tests/integration/query/simple/with_filter/with_not_test.go +++ b/tests/integration/query/simple/with_filter/with_not_test.go @@ -48,15 +48,15 @@ func TestQuerySimple_WithNotEqualToXFilter_NoError(t *testing.T) { Results: []map[string]any{ { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, } @@ -96,7 +96,7 @@ func TestQuerySimple_WithNotAndComparisonXFilter_NoError(t *testing.T) { Results: []map[string]any{ { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), }, }, } @@ -136,11 +136,11 @@ func TestQuerySimple_WithNotEqualToXorYFilter_NoError(t *testing.T) { Results: []map[string]any{ { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, } @@ -219,19 +219,19 @@ func TestQuerySimple_WithNotEqualToXAndNotYFilter_NoError(t *testing.T) { Results: []map[string]any{ { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), }, }, } diff --git a/tests/integration/query/simple/with_filter/with_or_test.go b/tests/integration/query/simple/with_filter/with_or_test.go index 513283b5bf..1ff1f91ce6 100644 --- a/tests/integration/query/simple/with_filter/with_or_test.go +++ b/tests/integration/query/simple/with_filter/with_or_test.go @@ -48,11 +48,11 @@ func TestQuerySimpleWithIntEqualToXOrYFilter(t *testing.T) { Results: []map[string]any{ { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), }, { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), }, }, } diff --git a/tests/integration/query/simple/with_group_average_filter_test.go b/tests/integration/query/simple/with_group_average_filter_test.go index 98484d3c0b..23d79b0bf8 100644 --- a/tests/integration/query/simple/with_group_average_filter_test.go +++ b/tests/integration/query/simple/with_group_average_filter_test.go @@ -90,7 +90,7 @@ func TestQuerySimpleWithGroupByStringWithRenderedGroupAndChildAverageWithFilter( "_avg": float64(0), "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -99,10 +99,10 @@ func TestQuerySimpleWithGroupByStringWithRenderedGroupAndChildAverageWithFilter( "_avg": float64(33), "_group": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, { - "Age": uint64(34), + "Age": int64(34), }, }, }, @@ -149,10 +149,10 @@ func TestQuerySimpleWithGroupByStringWithRenderedGroupAndChildAverageWithDateTim "_avg": float64(33), "_group": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, { - "Age": uint64(34), + "Age": int64(34), }, }, }, @@ -161,7 +161,7 @@ func TestQuerySimpleWithGroupByStringWithRenderedGroupAndChildAverageWithDateTim "_avg": float64(0), "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -210,7 +210,7 @@ func TestQuerySimpleWithGroupByStringWithRenderedGroupWithFilterAndChildAverageW "_avg": float64(34), "_group": []map[string]any{ { - "Age": uint64(34), + "Age": int64(34), }, }, }, @@ -257,7 +257,7 @@ func TestQuerySimpleWithGroupByStringWithRenderedGroupWithFilterAndChildAverageW "_avg": float64(34), "_group": []map[string]any{ { - "Age": uint64(34), + "Age": int64(34), }, }, }, @@ -306,7 +306,7 @@ func TestQuerySimpleWithGroupByStringWithRenderedGroupWithFilterAndChildAverageW "_avg": float64(0), "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -315,7 +315,7 @@ func TestQuerySimpleWithGroupByStringWithRenderedGroupWithFilterAndChildAverageW "_avg": float64(34), "_group": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, }, }, diff --git a/tests/integration/query/simple/with_group_average_test.go b/tests/integration/query/simple/with_group_average_test.go index 33209786f7..cebf392c26 100644 --- a/tests/integration/query/simple/with_group_average_test.go +++ b/tests/integration/query/simple/with_group_average_test.go @@ -441,7 +441,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndAverageOfAverageOfA "_avg": float64(2.22), "_group": []map[string]any{ { - "Age": uint64(34), + "Age": int64(34), "_avg": float64(2.22), }, }, @@ -451,11 +451,11 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndAverageOfAverageOfA "_avg": float64(1.715), "_group": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_avg": float64(1.61), }, { - "Age": uint64(25), + "Age": int64(25), "_avg": float64(1.82), }, }, @@ -471,7 +471,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndAverageOfAverageOfA "_avg": float64(2.04), "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), "_avg": float64(2.04), }, }, @@ -487,7 +487,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndAverageOfAverageOfA "_avg": float64(1.74), "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), "_avg": float64(1.74), }, }, diff --git a/tests/integration/query/simple/with_group_count_filter_test.go b/tests/integration/query/simple/with_group_count_filter_test.go index bfa14898c1..18a60bedf9 100644 --- a/tests/integration/query/simple/with_group_count_filter_test.go +++ b/tests/integration/query/simple/with_group_count_filter_test.go @@ -43,11 +43,11 @@ func TestQuerySimpleWithGroupByNumberWithoutRenderedGroupAndChildCountWithFilter }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_count": 2, }, { - "Age": uint64(19), + "Age": int64(19), "_count": 0, }, }, @@ -86,7 +86,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupAndChildCountWithFilter(t }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_count": 2, "_group": []map[string]any{ { @@ -98,7 +98,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupAndChildCountWithFilter(t }, }, { - "Age": uint64(19), + "Age": int64(19), "_count": 0, "_group": []map[string]any{ { @@ -142,7 +142,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithFilterAndChildCountWit }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_count": 1, "_group": []map[string]any{ { @@ -151,7 +151,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithFilterAndChildCountWit }, }, { - "Age": uint64(19), + "Age": int64(19), "_count": 0, "_group": []map[string]any{}, }, @@ -191,7 +191,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithFilterAndChildCountWit }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_count": 2, "_group": []map[string]any{ { @@ -200,7 +200,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithFilterAndChildCountWit }, }, { - "Age": uint64(19), + "Age": int64(19), "_count": 0, "_group": []map[string]any{}, }, @@ -238,12 +238,12 @@ func TestQuerySimpleWithGroupByNumberWithoutRenderedGroupAndChildCountsWithDiffe }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "C1": 2, "C2": 0, }, { - "Age": uint64(19), + "Age": int64(19), "C1": 0, "C2": 1, }, diff --git a/tests/integration/query/simple/with_group_count_limit_offset_test.go b/tests/integration/query/simple/with_group_count_limit_offset_test.go index c8ce7e842a..45fe4bf90a 100644 --- a/tests/integration/query/simple/with_group_count_limit_offset_test.go +++ b/tests/integration/query/simple/with_group_count_limit_offset_test.go @@ -43,11 +43,11 @@ func TestQuerySimpleWithGroupByNumberWithoutRenderedGroupAndChildCountWithLimitA }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_count": 1, }, { - "Age": uint64(19), + "Age": int64(19), "_count": 0, }, }, @@ -90,7 +90,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithLimitAndChildCountWith }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_count": 1, "_group": []map[string]any{ { @@ -102,7 +102,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithLimitAndChildCountWith }, }, { - "Age": uint64(19), + "Age": int64(19), "_count": 0, "_group": []map[string]any{ { diff --git a/tests/integration/query/simple/with_group_count_limit_test.go b/tests/integration/query/simple/with_group_count_limit_test.go index 15abc3d779..9476c99bc9 100644 --- a/tests/integration/query/simple/with_group_count_limit_test.go +++ b/tests/integration/query/simple/with_group_count_limit_test.go @@ -43,11 +43,11 @@ func TestQuerySimpleWithGroupByNumberWithoutRenderedGroupAndChildCountWithLimit( }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_count": 1, }, { - "Age": uint64(19), + "Age": int64(19), "_count": 1, }, }, @@ -90,7 +90,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithLimitAndChildCountWith }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_count": 1, "_group": []map[string]any{ { @@ -102,7 +102,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithLimitAndChildCountWith }, }, { - "Age": uint64(19), + "Age": int64(19), "_count": 1, "_group": []map[string]any{ { diff --git a/tests/integration/query/simple/with_group_count_test.go b/tests/integration/query/simple/with_group_count_test.go index e10de0aad6..6f3fda8320 100644 --- a/tests/integration/query/simple/with_group_count_test.go +++ b/tests/integration/query/simple/with_group_count_test.go @@ -92,11 +92,11 @@ func TestQuerySimpleWithGroupByNumberWithoutRenderedGroupAndChildCount(t *testin }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_count": 2, }, { - "Age": uint64(19), + "Age": int64(19), "_count": 1, }, }, @@ -150,7 +150,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupAndChildCount(t *testing.T }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_count": 2, "_group": []map[string]any{ { @@ -162,7 +162,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupAndChildCount(t *testing.T }, }, { - "Age": uint64(19), + "Age": int64(19), "_count": 1, "_group": []map[string]any{ { @@ -234,11 +234,11 @@ func TestQuerySimpleWithGroupByNumberWithoutRenderedGroupAndAliasesChildCount(t }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "Count": 2, }, { - "Age": uint64(19), + "Age": int64(19), "Count": 1, }, }, @@ -277,12 +277,12 @@ func TestQuerySimpleWithGroupByNumberWithoutRenderedGroupAndDuplicatedAliasedChi }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "Count1": 2, "Count2": 2, }, { - "Age": uint64(19), + "Age": int64(19), "Count1": 1, "Count2": 1, }, diff --git a/tests/integration/query/simple/with_group_dockey_test.go b/tests/integration/query/simple/with_group_dockey_test.go index e7a58e9222..c40a27efc2 100644 --- a/tests/integration/query/simple/with_group_dockey_test.go +++ b/tests/integration/query/simple/with_group_dockey_test.go @@ -46,11 +46,11 @@ func TestQuerySimpleWithGroupByWithGroupWithDocKey(t *testing.T) { }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_group": []map[string]any{}, }, { - "Age": uint64(21), + "Age": int64(21), "_group": []map[string]any{ { "Name": "John", diff --git a/tests/integration/query/simple/with_group_dockeys_test.go b/tests/integration/query/simple/with_group_dockeys_test.go index 7a6e5b5204..8d11607819 100644 --- a/tests/integration/query/simple/with_group_dockeys_test.go +++ b/tests/integration/query/simple/with_group_dockeys_test.go @@ -51,11 +51,11 @@ func TestQuerySimpleWithGroupByWithGroupWithDocKeys(t *testing.T) { }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_group": []map[string]any{}, }, { - "Age": uint64(21), + "Age": int64(21), "_group": []map[string]any{ { "Name": "John", diff --git a/tests/integration/query/simple/with_group_filter_test.go b/tests/integration/query/simple/with_group_filter_test.go index 4594767bc3..36e09fa69f 100644 --- a/tests/integration/query/simple/with_group_filter_test.go +++ b/tests/integration/query/simple/with_group_filter_test.go @@ -56,7 +56,7 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberFilter(t *testing.T) { "Name": "John", "_group": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, }, }, @@ -64,7 +64,7 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberFilter(t *testing.T) { "Name": "Carlo", "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, }, @@ -110,7 +110,7 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithParentFilter(t *testing. "Name": "John", "_group": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, }, }, @@ -118,7 +118,7 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithParentFilter(t *testing. "Name": "Carlo", "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, }, @@ -292,7 +292,7 @@ func TestQuerySimpleWithGroupByStringWithMultipleGroupNumberFilter(t *testing.T) "G1": []map[string]any{}, "G2": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -300,12 +300,12 @@ func TestQuerySimpleWithGroupByStringWithMultipleGroupNumberFilter(t *testing.T) "Name": "John", "G1": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, }, "G2": []map[string]any{ { - "Age": uint64(25), + "Age": int64(25), }, }, }, @@ -313,7 +313,7 @@ func TestQuerySimpleWithGroupByStringWithMultipleGroupNumberFilter(t *testing.T) "Name": "Carlo", "G1": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, "G2": []map[string]any{}, diff --git a/tests/integration/query/simple/with_group_limit_offset_test.go b/tests/integration/query/simple/with_group_limit_offset_test.go index cabcb6ff76..0fea317d5a 100644 --- a/tests/integration/query/simple/with_group_limit_offset_test.go +++ b/tests/integration/query/simple/with_group_limit_offset_test.go @@ -45,7 +45,7 @@ func TestQuerySimpleWithGroupByNumberWithGroupLimitAndOffset(t *testing.T) { }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_group": []map[string]any{ { "Name": "John", @@ -53,7 +53,7 @@ func TestQuerySimpleWithGroupByNumberWithGroupLimitAndOffset(t *testing.T) { }, }, { - "Age": uint64(19), + "Age": int64(19), "_group": []map[string]any{}, }, }, @@ -91,7 +91,7 @@ func TestQuerySimpleWithGroupByNumberWithLimitAndOffsetAndWithGroupLimitAndOffse }, Results: []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), "_group": []map[string]any{}, }, }, diff --git a/tests/integration/query/simple/with_group_limit_test.go b/tests/integration/query/simple/with_group_limit_test.go index 03f1ce8682..b5b53c3b81 100644 --- a/tests/integration/query/simple/with_group_limit_test.go +++ b/tests/integration/query/simple/with_group_limit_test.go @@ -45,7 +45,7 @@ func TestQuerySimpleWithGroupByNumberWithGroupLimit(t *testing.T) { }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_group": []map[string]any{ { "Name": "Bob", @@ -53,7 +53,7 @@ func TestQuerySimpleWithGroupByNumberWithGroupLimit(t *testing.T) { }, }, { - "Age": uint64(19), + "Age": int64(19), "_group": []map[string]any{ { "Name": "Alice", @@ -98,7 +98,7 @@ func TestQuerySimpleWithGroupByNumberWithMultipleGroupsWithDifferentLimits(t *te }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "G1": []map[string]any{ { "Name": "Bob", @@ -114,7 +114,7 @@ func TestQuerySimpleWithGroupByNumberWithMultipleGroupsWithDifferentLimits(t *te }, }, { - "Age": uint64(19), + "Age": int64(19), "G1": []map[string]any{ { "Name": "Alice", @@ -161,7 +161,7 @@ func TestQuerySimpleWithGroupByNumberWithLimitAndGroupWithHigherLimit(t *testing }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_group": []map[string]any{ { "Name": "Bob", @@ -210,7 +210,7 @@ func TestQuerySimpleWithGroupByNumberWithLimitAndGroupWithLowerLimit(t *testing. }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_group": []map[string]any{ { "Name": "Bob", @@ -218,7 +218,7 @@ func TestQuerySimpleWithGroupByNumberWithLimitAndGroupWithLowerLimit(t *testing. }, }, { - "Age": uint64(42), + "Age": int64(42), "_group": []map[string]any{ { "Name": "Alice", diff --git a/tests/integration/query/simple/with_group_order_test.go b/tests/integration/query/simple/with_group_order_test.go index aeb7a7ad50..ffe52ca7c1 100644 --- a/tests/integration/query/simple/with_group_order_test.go +++ b/tests/integration/query/simple/with_group_order_test.go @@ -52,7 +52,7 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithGroupOrder(t *testing.T) "Name": "Alice", "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -60,10 +60,10 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithGroupOrder(t *testing.T) "Name": "John", "_group": []map[string]any{ { - "Age": uint64(25), + "Age": int64(25), }, { - "Age": uint64(32), + "Age": int64(32), }, }, }, @@ -71,7 +71,7 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithGroupOrder(t *testing.T) "Name": "Carlo", "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, }, @@ -117,7 +117,7 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithGroupOrderDescending(t * "Name": "Alice", "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -125,7 +125,7 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithGroupOrderDescending(t * "Name": "Carlo", "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, }, @@ -133,10 +133,10 @@ func TestQuerySimpleWithGroupByStringWithGroupNumberWithGroupOrderDescending(t * "Name": "John", "_group": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, { - "Age": uint64(25), + "Age": int64(25), }, }, }, @@ -182,10 +182,10 @@ func TestQuerySimpleWithGroupByStringAndOrderDescendingWithGroupNumberWithGroupO "Name": "John", "_group": []map[string]any{ { - "Age": uint64(25), + "Age": int64(25), }, { - "Age": uint64(32), + "Age": int64(32), }, }, }, @@ -193,7 +193,7 @@ func TestQuerySimpleWithGroupByStringAndOrderDescendingWithGroupNumberWithGroupO "Name": "Carlo", "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, }, @@ -201,7 +201,7 @@ func TestQuerySimpleWithGroupByStringAndOrderDescendingWithGroupNumberWithGroupO "Name": "Alice", "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -262,10 +262,10 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanThenInnerOrderDescendi "Verified": true, "_group": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, { - "Age": uint64(25), + "Age": int64(25), }, }, }, @@ -273,7 +273,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanThenInnerOrderDescendi "Verified": false, "_group": []map[string]any{ { - "Age": uint64(34), + "Age": int64(34), }, }, }, @@ -286,7 +286,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanThenInnerOrderDescendi "Verified": true, "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, }, @@ -299,7 +299,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanThenInnerOrderDescendi "Verified": false, "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -364,10 +364,10 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndOrderAscendingThenI "Verified": false, "_group": []map[string]any{ { - "Age": uint64(34), + "Age": int64(34), }, { - "Age": uint64(25), + "Age": int64(25), }, }, }, @@ -375,7 +375,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndOrderAscendingThenI "Verified": true, "_group": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, }, }, @@ -388,7 +388,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndOrderAscendingThenI "Verified": false, "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -401,7 +401,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndOrderAscendingThenI "Verified": true, "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, }, diff --git a/tests/integration/query/simple/with_group_sum_filter_test.go b/tests/integration/query/simple/with_group_sum_filter_test.go index 73bc66377f..15d6a89acb 100644 --- a/tests/integration/query/simple/with_group_sum_filter_test.go +++ b/tests/integration/query/simple/with_group_sum_filter_test.go @@ -43,11 +43,11 @@ func TestQuerySimpleWithGroupByNumberWithoutRenderedGroupAndChildSumWithFilter(t }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_sum": int64(64), }, { - "Age": uint64(19), + "Age": int64(19), "_sum": int64(0), }, }, @@ -86,7 +86,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupAndChildSumWithFilter(t *t }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_sum": int64(64), "_group": []map[string]any{ { @@ -98,7 +98,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupAndChildSumWithFilter(t *t }, }, { - "Age": uint64(19), + "Age": int64(19), "_sum": int64(0), "_group": []map[string]any{ { @@ -142,7 +142,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithFilterAndChildSumWithM }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_sum": int64(32), "_group": []map[string]any{ { @@ -151,7 +151,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithFilterAndChildSumWithM }, }, { - "Age": uint64(19), + "Age": int64(19), "_sum": int64(0), "_group": []map[string]any{}, }, @@ -191,7 +191,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithFilterAndChildSumWithD }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_sum": int64(64), "_group": []map[string]any{ { @@ -200,7 +200,7 @@ func TestQuerySimpleWithGroupByNumberWithRenderedGroupWithFilterAndChildSumWithD }, }, { - "Age": uint64(19), + "Age": int64(19), "_sum": int64(0), "_group": []map[string]any{}, }, @@ -238,12 +238,12 @@ func TestQuerySimpleWithGroupByNumberWithoutRenderedGroupAndChildSumsWithDiffere }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "S1": int64(64), "S2": int64(0), }, { - "Age": uint64(19), + "Age": int64(19), "S1": int64(0), "S2": int64(19), }, diff --git a/tests/integration/query/simple/with_group_sum_test.go b/tests/integration/query/simple/with_group_sum_test.go index 18b2190526..9391ef7d5f 100644 --- a/tests/integration/query/simple/with_group_sum_test.go +++ b/tests/integration/query/simple/with_group_sum_test.go @@ -441,7 +441,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndSumOfSumOfSumOfFloa "_sum": float64(2.22), "_group": []map[string]any{ { - "Age": uint64(34), + "Age": int64(34), "_sum": float64(2.22), }, }, @@ -451,11 +451,11 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndSumOfSumOfSumOfFloa "_sum": float64(3.43), "_group": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_sum": float64(1.61), }, { - "Age": uint64(25), + "Age": int64(25), "_sum": float64(1.82), }, }, @@ -471,7 +471,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndSumOfSumOfSumOfFloa "_sum": float64(2.04), "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), "_sum": float64(2.04), }, }, @@ -487,7 +487,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBooleanAndSumOfSumOfSumOfFloa "_sum": float64(1.74), "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), "_sum": float64(1.74), }, }, diff --git a/tests/integration/query/simple/with_group_test.go b/tests/integration/query/simple/with_group_test.go index c926f580a6..3fae88b1ef 100644 --- a/tests/integration/query/simple/with_group_test.go +++ b/tests/integration/query/simple/with_group_test.go @@ -85,13 +85,13 @@ func TestQuerySimpleWithGroupByNumber(t *testing.T) { }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, { - "Age": uint64(19), + "Age": int64(19), }, { - "Age": uint64(55), + "Age": int64(55), }, }, } @@ -176,7 +176,7 @@ func TestQuerySimpleWithGroupByNumberWithGroupString(t *testing.T) { }, Results: []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), "_group": []map[string]any{ { "Name": "Bob", @@ -187,7 +187,7 @@ func TestQuerySimpleWithGroupByNumberWithGroupString(t *testing.T) { }, }, { - "Age": uint64(19), + "Age": int64(19), "_group": []map[string]any{ { "Name": "Alice", @@ -195,7 +195,7 @@ func TestQuerySimpleWithGroupByNumberWithGroupString(t *testing.T) { }, }, { - "Age": uint64(55), + "Age": int64(55), "_group": []map[string]any{ { "Name": "Carlo", @@ -244,7 +244,7 @@ func TestQuerySimpleWithGroupByWithoutGroupedFieldSelectedWithInnerGroup(t *test "Name": "Alice", "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -252,10 +252,10 @@ func TestQuerySimpleWithGroupByWithoutGroupedFieldSelectedWithInnerGroup(t *test "Name": "John", "_group": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, { - "Age": uint64(25), + "Age": int64(25), }, }, }, @@ -263,7 +263,7 @@ func TestQuerySimpleWithGroupByWithoutGroupedFieldSelectedWithInnerGroup(t *test "Name": "Carlo", "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, }, @@ -309,7 +309,7 @@ func TestQuerySimpleWithGroupByString(t *testing.T) { "Name": "Alice", "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -317,10 +317,10 @@ func TestQuerySimpleWithGroupByString(t *testing.T) { "Name": "John", "_group": []map[string]any{ { - "Age": uint64(32), + "Age": int64(32), }, { - "Age": uint64(25), + "Age": int64(25), }, }, }, @@ -328,7 +328,7 @@ func TestQuerySimpleWithGroupByString(t *testing.T) { "Name": "Carlo", "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, }, @@ -389,10 +389,10 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBoolean(t *testing.T) { "Verified": true, "_group": []map[string]any{ { - "Age": uint64(25), + "Age": int64(25), }, { - "Age": uint64(32), + "Age": int64(32), }, }, }, @@ -400,7 +400,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBoolean(t *testing.T) { "Verified": false, "_group": []map[string]any{ { - "Age": uint64(34), + "Age": int64(34), }, }, }, @@ -413,7 +413,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBoolean(t *testing.T) { "Verified": false, "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -426,7 +426,7 @@ func TestQuerySimpleWithGroupByStringWithInnerGroupBoolean(t *testing.T) { "Verified": true, "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, }, @@ -485,10 +485,10 @@ func TestQuerySimpleWithGroupByStringThenBoolean(t *testing.T) { "Verified": true, "_group": []map[string]any{ { - "Age": uint64(25), + "Age": int64(25), }, { - "Age": uint64(32), + "Age": int64(32), }, }, }, @@ -497,7 +497,7 @@ func TestQuerySimpleWithGroupByStringThenBoolean(t *testing.T) { "Verified": false, "_group": []map[string]any{ { - "Age": uint64(34), + "Age": int64(34), }, }, }, @@ -506,7 +506,7 @@ func TestQuerySimpleWithGroupByStringThenBoolean(t *testing.T) { "Verified": false, "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -515,7 +515,7 @@ func TestQuerySimpleWithGroupByStringThenBoolean(t *testing.T) { "Verified": true, "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, }, @@ -572,10 +572,10 @@ func TestQuerySimpleWithGroupByBooleanThenNumber(t *testing.T) { "Verified": true, "_group": []map[string]any{ { - "Age": uint64(25), + "Age": int64(25), }, { - "Age": uint64(32), + "Age": int64(32), }, }, }, @@ -584,7 +584,7 @@ func TestQuerySimpleWithGroupByBooleanThenNumber(t *testing.T) { "Verified": false, "_group": []map[string]any{ { - "Age": uint64(34), + "Age": int64(34), }, }, }, @@ -593,7 +593,7 @@ func TestQuerySimpleWithGroupByBooleanThenNumber(t *testing.T) { "Verified": false, "_group": []map[string]any{ { - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -602,7 +602,7 @@ func TestQuerySimpleWithGroupByBooleanThenNumber(t *testing.T) { "Verified": true, "_group": []map[string]any{ { - "Age": uint64(55), + "Age": int64(55), }, }, }, @@ -639,7 +639,7 @@ func TestQuerySimpleWithGroupByNumberOnUndefined(t *testing.T) { "Age": nil, }, { - "Age": uint64(32), + "Age": int64(32), }, }, } @@ -685,7 +685,7 @@ func TestQuerySimpleWithGroupByNumberOnUndefinedWithChildren(t *testing.T) { }, }, { - "Age": uint64(32), + "Age": int64(32), "_group": []map[string]any{ { "Name": "John", diff --git a/tests/integration/query/simple/with_key_test.go b/tests/integration/query/simple/with_key_test.go index 556cabe757..f6854da643 100644 --- a/tests/integration/query/simple/with_key_test.go +++ b/tests/integration/query/simple/with_key_test.go @@ -40,7 +40,7 @@ func TestQuerySimpleWithKeyFilterBlock(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, } diff --git a/tests/integration/query/simple/with_limit_offset_test.go b/tests/integration/query/simple/with_limit_offset_test.go index f6701d2030..13683414c7 100644 --- a/tests/integration/query/simple/with_limit_offset_test.go +++ b/tests/integration/query/simple/with_limit_offset_test.go @@ -74,7 +74,7 @@ func TestQuerySimpleWithLimit(t *testing.T) { Results: []map[string]any{ { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, }, }, @@ -109,11 +109,11 @@ func TestQuerySimpleWithLimit(t *testing.T) { Results: []map[string]any{ { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), }, }, }, @@ -149,7 +149,7 @@ func TestQuerySimpleWithLimitAndOffset(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -184,11 +184,11 @@ func TestQuerySimpleWithLimitAndOffset(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), }, }, }, @@ -224,7 +224,7 @@ func TestQuerySimpleWithOffset(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, }, }, @@ -263,15 +263,15 @@ func TestQuerySimpleWithOffset(t *testing.T) { Results: []map[string]any{ { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), }, }, }, diff --git a/tests/integration/query/simple/with_order_filter_test.go b/tests/integration/query/simple/with_order_filter_test.go index c0775bc846..d65a197f2c 100644 --- a/tests/integration/query/simple/with_order_filter_test.go +++ b/tests/integration/query/simple/with_order_filter_test.go @@ -48,11 +48,11 @@ func TestQuerySimpleWithNumericGreaterThanFilterAndNumericOrderDescending(t *tes Results: []map[string]any{ { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), }, { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, }, } diff --git a/tests/integration/query/simple/with_order_test.go b/tests/integration/query/simple/with_order_test.go index ae7e6c865f..0936feccb1 100644 --- a/tests/integration/query/simple/with_order_test.go +++ b/tests/integration/query/simple/with_order_test.go @@ -43,15 +43,15 @@ func TestQuerySimpleWithEmptyOrder(t *testing.T) { Results: []map[string]any{ { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), }, }, } @@ -91,19 +91,19 @@ func TestQuerySimpleWithNumericOrderAscending(t *testing.T) { Results: []map[string]any{ { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), }, }, } @@ -147,19 +147,19 @@ func TestQuerySimpleWithDateTimeOrderAscending(t *testing.T) { Results: []map[string]any{ { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), }, }, } @@ -199,19 +199,19 @@ func TestQuerySimpleWithNumericOrderDescending(t *testing.T) { Results: []map[string]any{ { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), }, { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), }, }, } @@ -255,19 +255,19 @@ func TestQuerySimpleWithDateTimeOrderDescending(t *testing.T) { Results: []map[string]any{ { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), }, { "Name": "Bob", - "Age": uint64(32), + "Age": int64(32), }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), }, { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), }, }, } @@ -312,22 +312,22 @@ func TestQuerySimpleWithNumericOrderDescendingAndBooleanOrderAscending(t *testin Results: []map[string]any{ { "Name": "Carlo", - "Age": uint64(55), + "Age": int64(55), "Verified": true, }, { "Name": "Bob", - "Age": uint64(21), + "Age": int64(21), "Verified": false, }, { "Name": "John", - "Age": uint64(21), + "Age": int64(21), "Verified": true, }, { "Name": "Alice", - "Age": uint64(19), + "Age": int64(19), "Verified": false, }, }, diff --git a/tests/integration/query/simple/with_restart_test.go b/tests/integration/query/simple/with_restart_test.go index 34b906a4bc..0991f55c23 100644 --- a/tests/integration/query/simple/with_restart_test.go +++ b/tests/integration/query/simple/with_restart_test.go @@ -46,7 +46,7 @@ func TestQuerySimpleWithRestart(t *testing.T) { Results: []map[string]any{ { "name": "Shahzad", - "age": uint64(30), + "age": int64(30), }, }, }, diff --git a/tests/integration/query/simple/with_version_test.go b/tests/integration/query/simple/with_version_test.go index 900ac48b40..868d3b54af 100644 --- a/tests/integration/query/simple/with_version_test.go +++ b/tests/integration/query/simple/with_version_test.go @@ -43,17 +43,17 @@ func TestQuerySimpleWithEmbeddedLatestCommit(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), "_version": []map[string]any{ { - "cid": "bafybeicloiyf5zl5k54cjuhfg6rpsj7rnhxswvnuizpagd2kwq4px6aqn4", + "cid": "bafybeieybepwqpy5h2d4sywksgvdqpjd44ciu223vrm7knumychpmucawy", "links": []map[string]any{ { - "cid": "bafybeieqi3u6kdbsb76qrfziiiabs52ztptecry34lo46cwfbqmf3u4kwi", + "cid": "bafybeibphw52n3t5nn2xn32sfdsf4hbll3iddsc6or2ebnnrmpz2cbovyy", "name": "Age", }, { - "cid": "bafybeidoaqrpud2z2d4jnjqqmo3kn5rakr7yh2d2cdmjkvk5fcisy54jam", + "cid": "bafybeifgqmrklbyw3x35zzzao3d7baownrv3z4v7vzfbmk2r5omv5icgu4", "name": "Name", }, }, @@ -90,7 +90,7 @@ func TestQuerySimpleWithEmbeddedLatestCommitWithSchemaVersionId(t *testing.T) { "Name": "John", "_version": []map[string]any{ { - "schemaVersionId": "bafkreicl3pjcorfcaexxmqcrilkhx7xl37o6b34nxgtiauygtl7hrqbhoq", + "schemaVersionId": "bafkreicqyapc7zxw5tt2ymybau5m54lhmm5ahrl22oaktnhidul757a4ba", }, }, }, @@ -168,17 +168,17 @@ func TestQuerySimpleWithMultipleAliasedEmbeddedLatestCommit(t *testing.T) { Results: []map[string]any{ { "Name": "John", - "Age": uint64(21), + "Age": int64(21), "_version": []map[string]any{ { - "cid": "bafybeicloiyf5zl5k54cjuhfg6rpsj7rnhxswvnuizpagd2kwq4px6aqn4", + "cid": "bafybeieybepwqpy5h2d4sywksgvdqpjd44ciu223vrm7knumychpmucawy", "L1": []map[string]any{ { - "cid": "bafybeieqi3u6kdbsb76qrfziiiabs52ztptecry34lo46cwfbqmf3u4kwi", + "cid": "bafybeibphw52n3t5nn2xn32sfdsf4hbll3iddsc6or2ebnnrmpz2cbovyy", "name": "Age", }, { - "cid": "bafybeidoaqrpud2z2d4jnjqqmo3kn5rakr7yh2d2cdmjkvk5fcisy54jam", + "cid": "bafybeifgqmrklbyw3x35zzzao3d7baownrv3z4v7vzfbmk2r5omv5icgu4", "name": "Name", }, }, diff --git a/tests/integration/results.go b/tests/integration/results.go index 052de310c5..35a2249c0b 100644 --- a/tests/integration/results.go +++ b/tests/integration/results.go @@ -28,7 +28,7 @@ type AnyOf []any // The comparison is relaxed when using client types other than goClientType. func assertResultsAnyOf(t *testing.T, client ClientType, expected AnyOf, actual any, msgAndArgs ...any) { switch client { - case httpClientType: + case HTTPClientType, CLIClientType: if !areResultsAnyOf(expected, actual) { assert.Contains(t, expected, actual, msgAndArgs...) } @@ -42,7 +42,7 @@ func assertResultsAnyOf(t *testing.T, client ClientType, expected AnyOf, actual // The comparison is relaxed when using client types other than goClientType. func assertResultsEqual(t *testing.T, client ClientType, expected any, actual any, msgAndArgs ...any) { switch client { - case httpClientType: + case HTTPClientType, CLIClientType: if !areResultsEqual(expected, actual) { assert.EqualValues(t, expected, actual, msgAndArgs...) } diff --git a/tests/integration/schema/get_schema_test.go b/tests/integration/schema/get_schema_test.go new file mode 100644 index 0000000000..e6d5f166ac --- /dev/null +++ b/tests/integration/schema/get_schema_test.go @@ -0,0 +1,274 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package schema + +import ( + "testing" + + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestGetSchema_GivenNonExistantSchemaVersionID_Errors(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.GetSchema{ + VersionID: immutable.Some("does not exist"), + ExpectedError: "datastore: key not found", + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestGetSchema_GivenNoSchemaReturnsEmptySet(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.GetSchema{ + ExpectedResults: []client.SchemaDescription{}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestGetSchema_GivenNoSchemaGivenUnknownRoot(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.GetSchema{ + Root: immutable.Some("does not exist"), + ExpectedResults: []client.SchemaDescription{}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestGetSchema_GivenNoSchemaGivenUnknownName(t *testing.T) { + test := testUtils.TestCase{ + Actions: []any{ + testUtils.GetSchema{ + Name: immutable.Some("does not exist"), + ExpectedResults: []client.SchemaDescription{}, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestGetSchema_ReturnsAllSchema(t *testing.T) { + usersSchemaVersion1ID := "bafkreickgf3nbjaairxkkqawmrv7fafaafyccl4qygqeveagisdn42eohu" + usersSchemaVersion2ID := "bafkreicseqwxooxo2wf2bgzdalwtm2rtsj7x4mgsir4rp4htmpnwnffwre" + booksSchemaVersion1ID := "bafkreigbfibfn7g6neen2gghc54dzocexefi7vshc3opgvy6j7jflar2nm" + + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users {} + `, + }, + testUtils.SchemaUpdate{ + Schema: ` + type Books {} + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "name", "Kind": "String"} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.GetSchema{ + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + Root: usersSchemaVersion1ID, + VersionID: usersSchemaVersion1ID, + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + }, + }, + { + Name: "Users", + Root: usersSchemaVersion1ID, + VersionID: usersSchemaVersion2ID, + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + ID: 1, + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + { + Name: "Books", + Root: booksSchemaVersion1ID, + VersionID: booksSchemaVersion1ID, + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestGetSchema_ReturnsSchemaForGivenRoot(t *testing.T) { + usersSchemaVersion1ID := "bafkreickgf3nbjaairxkkqawmrv7fafaafyccl4qygqeveagisdn42eohu" + usersSchemaVersion2ID := "bafkreicseqwxooxo2wf2bgzdalwtm2rtsj7x4mgsir4rp4htmpnwnffwre" + + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users {} + `, + }, + testUtils.SchemaUpdate{ + Schema: ` + type Books {} + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "name", "Kind": "String"} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.GetSchema{ + Root: immutable.Some(usersSchemaVersion1ID), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + Root: usersSchemaVersion1ID, + VersionID: usersSchemaVersion1ID, + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + }, + }, + { + Name: "Users", + Root: usersSchemaVersion1ID, + VersionID: usersSchemaVersion2ID, + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + ID: 1, + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestGetSchema_ReturnsSchemaForGivenName(t *testing.T) { + usersSchemaVersion1ID := "bafkreickgf3nbjaairxkkqawmrv7fafaafyccl4qygqeveagisdn42eohu" + usersSchemaVersion2ID := "bafkreicseqwxooxo2wf2bgzdalwtm2rtsj7x4mgsir4rp4htmpnwnffwre" + + test := testUtils.TestCase{ + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users {} + `, + }, + testUtils.SchemaUpdate{ + Schema: ` + type Books {} + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "name", "Kind": "String"} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.GetSchema{ + Name: immutable.Some("Users"), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + Root: usersSchemaVersion1ID, + VersionID: usersSchemaVersion1ID, + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + }, + }, + { + Name: "Users", + Root: usersSchemaVersion1ID, + VersionID: usersSchemaVersion2ID, + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + ID: 1, + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/migrations/query/simple_test.go b/tests/integration/schema/migrations/query/simple_test.go index 56f94b2e6b..a13fd32be9 100644 --- a/tests/integration/schema/migrations/query/simple_test.go +++ b/tests/integration/schema/migrations/query/simple_test.go @@ -39,14 +39,14 @@ func TestSchemaMigrationQuery(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -109,14 +109,14 @@ func TestSchemaMigrationQueryMultipleDocs(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -178,8 +178,8 @@ func TestSchemaMigrationQueryWithMigrationRegisteredBeforeSchemaPatch(t *testing }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -196,7 +196,7 @@ func TestSchemaMigrationQueryWithMigrationRegisteredBeforeSchemaPatch(t *testing testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -239,14 +239,14 @@ func TestSchemaMigrationQueryMigratesToIntermediaryVersion(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, }, @@ -254,8 +254,8 @@ func TestSchemaMigrationQueryMigratesToIntermediaryVersion(t *testing.T) { // Register a migration from schema version 1 to schema version 2 **only** - // there should be no migration from version 2 to version 3. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -310,14 +310,14 @@ func TestSchemaMigrationQueryMigratesFromIntermediaryVersion(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, }, @@ -325,8 +325,8 @@ func TestSchemaMigrationQueryMigratesFromIntermediaryVersion(t *testing.T) { // Register a migration from schema version 2 to schema version 3 **only** - // there should be no migration from version 1 to version 2. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", - DestinationSchemaVersionID: "bafkreiadb2rps7a2zykywfxwfpgkvet5vmzaig4nvzl5sgfqquzr3qrvsq", + SourceSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", + DestinationSchemaVersionID: "bafkreigrpkox3omi3c3sp5zoupcjg2b32mysztjozaqsceafsdtkadzufe", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -381,21 +381,21 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersions(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -411,8 +411,8 @@ func TestSchemaMigrationQueryMigratesAcrossMultipleVersions(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", - DestinationSchemaVersionID: "bafkreiadb2rps7a2zykywfxwfpgkvet5vmzaig4nvzl5sgfqquzr3qrvsq", + SourceSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", + DestinationSchemaVersionID: "bafkreigrpkox3omi3c3sp5zoupcjg2b32mysztjozaqsceafsdtkadzufe", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -473,7 +473,7 @@ func TestSchemaMigrationQueryWithUnknownSchemaMigration(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -533,14 +533,14 @@ func TestSchemaMigrationQueryMigrationMutatesExistingScalarField(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -594,14 +594,14 @@ func TestSchemaMigrationQueryMigrationMutatesExistingInlineArrayField(t *testing testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreic427cayffkscmp2ng224wpmsryzwz5aec6dhbfr2xoljb4xbugji", - DestinationSchemaVersionID: "bafkreidrmuahiz4qenylm247udlro732ip3adwv3dqpeds3s2kghwtfvt4", + SourceSchemaVersionID: "bafkreidvca2kcxlxab2wi25xhiyxmug66css4cqzqqxu4rdyuanl7u5rde", + DestinationSchemaVersionID: "bafkreiedmg3qox3a24rkhkx3wahahpyixlxkicetsk3ctkh3f7xcbdrrli", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -657,14 +657,14 @@ func TestSchemaMigrationQueryMigrationRemovesExistingField(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", - DestinationSchemaVersionID: "bafkreia4bbxhtqwzw4smby5xsqxv6ptoc6ijc6v3lmnlv66twpfak5gxxq", + SourceSchemaVersionID: "bafkreiggbvwwiqmzid4qnklwwdyu7mwhbbjy3ejss3x7uw7zxw6ivmmj6u", + DestinationSchemaVersionID: "bafkreiat3mfdsoknsavvw3wbir4atbaswqbnnitn3ysswqih2g4zwbn62a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -718,14 +718,14 @@ func TestSchemaMigrationQueryMigrationPreservesExistingFieldWhenFieldNotRequeste testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", - DestinationSchemaVersionID: "bafkreia4bbxhtqwzw4smby5xsqxv6ptoc6ijc6v3lmnlv66twpfak5gxxq", + SourceSchemaVersionID: "bafkreiggbvwwiqmzid4qnklwwdyu7mwhbbjy3ejss3x7uw7zxw6ivmmj6u", + DestinationSchemaVersionID: "bafkreiat3mfdsoknsavvw3wbir4atbaswqbnnitn3ysswqih2g4zwbn62a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -761,7 +761,7 @@ func TestSchemaMigrationQueryMigrationPreservesExistingFieldWhenFieldNotRequeste Results: []map[string]any{ { "name": "Fred", - "age": uint64(40), + "age": int64(40), }, }, }, @@ -792,14 +792,14 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcFieldNotRequeste testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "yearsLived", "Kind": "Int"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "yearsLived", "Kind": "Int"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", - DestinationSchemaVersionID: "bafkreia4bbxhtqwzw4smby5xsqxv6ptoc6ijc6v3lmnlv66twpfak5gxxq", + SourceSchemaVersionID: "bafkreiggbvwwiqmzid4qnklwwdyu7mwhbbjy3ejss3x7uw7zxw6ivmmj6u", + DestinationSchemaVersionID: "bafkreiat3mfdsoknsavvw3wbir4atbaswqbnnitn3ysswqih2g4zwbn62a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -823,7 +823,7 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcFieldNotRequeste Results: []map[string]any{ { "name": "John", - "yearsLived": uint64(40), + "yearsLived": int64(40), }, }, }, @@ -854,14 +854,14 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcAndDstFieldNotRe testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "yearsLived", "Kind": "Int"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "yearsLived", "Kind": "Int"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreidovoxkxttybaew2qraoelormm63ilutzms7wlwmcr3xru44hfnta", - DestinationSchemaVersionID: "bafkreia4bbxhtqwzw4smby5xsqxv6ptoc6ijc6v3lmnlv66twpfak5gxxq", + SourceSchemaVersionID: "bafkreiggbvwwiqmzid4qnklwwdyu7mwhbbjy3ejss3x7uw7zxw6ivmmj6u", + DestinationSchemaVersionID: "bafkreiat3mfdsoknsavvw3wbir4atbaswqbnnitn3ysswqih2g4zwbn62a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -898,8 +898,8 @@ func TestSchemaMigrationQueryMigrationCopiesExistingFieldWhenSrcAndDstFieldNotRe Results: []map[string]any{ { "name": "John", - "age": uint64(40), - "yearsLived": uint64(40), + "age": int64(40), + "yearsLived": int64(40), }, }, }, diff --git a/tests/integration/schema/migrations/query/with_dockey_test.go b/tests/integration/schema/migrations/query/with_dockey_test.go index db58c9f066..a3a983d8bb 100644 --- a/tests/integration/schema/migrations/query/with_dockey_test.go +++ b/tests/integration/schema/migrations/query/with_dockey_test.go @@ -46,14 +46,14 @@ func TestSchemaMigrationQueryByDocKey(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -152,14 +152,14 @@ func TestSchemaMigrationQueryMultipleQueriesByDocKey(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_p2p_test.go b/tests/integration/schema/migrations/query/with_p2p_test.go index 7543b3275a..0fc5d2da79 100644 --- a/tests/integration/schema/migrations/query/with_p2p_test.go +++ b/tests/integration/schema/migrations/query/with_p2p_test.go @@ -39,15 +39,15 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtOlderSchemaVersion(t *testing NodeID: immutable.Some(1), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, }, testUtils.ConfigureMigration{ // Register the migration on both nodes. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreifmgqtwpvepenteuvj27u4ewix6nb7ypvyz6j555wsk5u2n7hrldm", - DestinationSchemaVersionID: "bafkreigfqdqnj5dunwgcsf2a6ht6q6m2yv3ys6byw5ifsmi5lfcpeh5t7e", + SourceSchemaVersionID: "bafkreiguj3z5egyieou3li6aeyhabgtpk4mtl6nr7jjmyoihc6dmdg6vbu", + DestinationSchemaVersionID: "bafkreidj4ipbeqhqn7at7du4vhzk3aw4xswbwccwqhbcab6avlgdeu6w2a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -129,15 +129,15 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtNewerSchemaVersion(t *testing NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, }, testUtils.ConfigureMigration{ // Register the migration on both nodes. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreifmgqtwpvepenteuvj27u4ewix6nb7ypvyz6j555wsk5u2n7hrldm", - DestinationSchemaVersionID: "bafkreigfqdqnj5dunwgcsf2a6ht6q6m2yv3ys6byw5ifsmi5lfcpeh5t7e", + SourceSchemaVersionID: "bafkreiguj3z5egyieou3li6aeyhabgtpk4mtl6nr7jjmyoihc6dmdg6vbu", + DestinationSchemaVersionID: "bafkreidj4ipbeqhqn7at7du4vhzk3aw4xswbwccwqhbcab6avlgdeu6w2a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -221,7 +221,7 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchNewerSchemaVersionWithSch NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -230,7 +230,7 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchNewerSchemaVersionWithSch NodeID: immutable.Some(0), Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } ] `, }, @@ -238,8 +238,8 @@ func TestSchemaMigrationQueryWithP2PReplicatedDocAtMuchNewerSchemaVersionWithSch // Register a migration from version 2 to version 3 on both nodes. // There is no migration from version 1 to 2, thus node 1 has no knowledge of schema version 2. LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", - DestinationSchemaVersionID: "bafkreiadb2rps7a2zykywfxwfpgkvet5vmzaig4nvzl5sgfqquzr3qrvsq", + SourceSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", + DestinationSchemaVersionID: "bafkreigrpkox3omi3c3sp5zoupcjg2b32mysztjozaqsceafsdtkadzufe", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_restart_test.go b/tests/integration/schema/migrations/query/with_restart_test.go index 2c1253bfd0..deac59c725 100644 --- a/tests/integration/schema/migrations/query/with_restart_test.go +++ b/tests/integration/schema/migrations/query/with_restart_test.go @@ -39,14 +39,14 @@ func TestSchemaMigrationQueryWithRestart(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_set_default_test.go b/tests/integration/schema/migrations/query/with_set_default_test.go new file mode 100644 index 0000000000..9d61d609e7 --- /dev/null +++ b/tests/integration/schema/migrations/query/with_set_default_test.go @@ -0,0 +1,236 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package query + +import ( + "testing" + + "github.com/lens-vm/lens/host-go/config/model" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" + testUtils "github.com/sourcenetwork/defradb/tests/integration" + "github.com/sourcenetwork/defradb/tests/lenses" +) + +func TestSchemaMigrationQuery_WithSetDefaultToLatest_AppliesForwardMigration(t *testing.T) { + schemaVersionID2 := "bafkreidj4ipbeqhqn7at7du4vhzk3aw4xswbwccwqhbcab6avlgdeu6w2a" + + test := testUtils.TestCase{ + Description: "Test schema migration", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + verified: Boolean + } + `, + }, + testUtils.CreateDoc{ + Doc: `{ + "name": "John" + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: "bafkreiguj3z5egyieou3li6aeyhabgtpk4mtl6nr7jjmyoihc6dmdg6vbu", + DestinationSchemaVersionID: schemaVersionID2, + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: schemaVersionID2, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + "verified": true, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQuery_WithSetDefaultToOriginal_AppliesInverseMigration(t *testing.T) { + schemaVersionID1 := "bafkreiguj3z5egyieou3li6aeyhabgtpk4mtl6nr7jjmyoihc6dmdg6vbu" + schemaVersionID2 := "bafkreidj4ipbeqhqn7at7du4vhzk3aw4xswbwccwqhbcab6avlgdeu6w2a" + + test := testUtils.TestCase{ + Description: "Test schema migration", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + verified: Boolean + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: schemaVersionID2, + }, + // Create John using the new schema version + testUtils.CreateDoc{ + Doc: `{ + "name": "John", + "verified": true + }`, + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: schemaVersionID1, + DestinationSchemaVersionID: schemaVersionID2, + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + // Set the schema version back to the original + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: schemaVersionID1, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + // The inverse lens migration has been applied, clearing the verified field + "verified": nil, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaMigrationQuery_WithSetDefaultToOriginalVersionThatDocWasCreatedAt_ClearsMigrations(t *testing.T) { + schemaVersionID1 := "bafkreiguj3z5egyieou3li6aeyhabgtpk4mtl6nr7jjmyoihc6dmdg6vbu" + schemaVersionID2 := "bafkreidj4ipbeqhqn7at7du4vhzk3aw4xswbwccwqhbcab6avlgdeu6w2a" + + test := testUtils.TestCase{ + Description: "Test schema migration", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + verified: Boolean + } + `, + }, + // Create John using the original schema version + testUtils.CreateDoc{ + Doc: `{ + "name": "John", + "verified": false + }`, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": "String"} } + ] + `, + SetAsDefaultVersion: immutable.Some(true), + }, + testUtils.ConfigureMigration{ + LensConfig: client.LensConfig{ + SourceSchemaVersionID: schemaVersionID1, + DestinationSchemaVersionID: schemaVersionID2, + Lens: model.Lens{ + Lenses: []model.LensModule{ + { + Path: lenses.SetDefaultModulePath, + Arguments: map[string]any{ + "dst": "verified", + "value": true, + }, + }, + }, + }, + }, + }, + // Set the schema version back to the original + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: schemaVersionID1, + }, + testUtils.Request{ + Request: `query { + Users { + name + verified + } + }`, + Results: []map[string]any{ + { + "name": "John", + // The inverse lens migration has not been applied, the document is returned as it was defined + "verified": false, + }, + }, + }, + }, + } + + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/schema/migrations/query/with_txn_test.go b/tests/integration/schema/migrations/query/with_txn_test.go index 3c55fd7748..fcd01d6748 100644 --- a/tests/integration/schema/migrations/query/with_txn_test.go +++ b/tests/integration/schema/migrations/query/with_txn_test.go @@ -40,15 +40,15 @@ func TestSchemaMigrationQueryWithTxn(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.ConfigureMigration{ TransactionID: immutable.Some(0), LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -102,15 +102,15 @@ func TestSchemaMigrationQueryWithTxnAndCommit(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.ConfigureMigration{ TransactionID: immutable.Some(0), LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/query/with_update_test.go b/tests/integration/schema/migrations/query/with_update_test.go index 35c6965ead..478ffd8e24 100644 --- a/tests/integration/schema/migrations/query/with_update_test.go +++ b/tests/integration/schema/migrations/query/with_update_test.go @@ -39,14 +39,14 @@ func TestSchemaMigrationQueryWithUpdateRequest(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -117,7 +117,7 @@ func TestSchemaMigrationQueryWithMigrationRegisteredAfterUpdate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "verified", "Kind": "Boolean"} } ] `, }, @@ -129,8 +129,8 @@ func TestSchemaMigrationQueryWithMigrationRegisteredAfterUpdate(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/migrations/simple_test.go b/tests/integration/schema/migrations/simple_test.go index b63be03b5a..c1982f5325 100644 --- a/tests/integration/schema/migrations/simple_test.go +++ b/tests/integration/schema/migrations/simple_test.go @@ -22,7 +22,7 @@ import ( // Migrations need to be able to be registered for unknown schema ids, so they // may migrate to/from them if recieved by the P2P system. -func TestSchemaMigrationDoesNotErrorGivenUnknownSchemaIDs(t *testing.T) { +func TestSchemaMigrationDoesNotErrorGivenUnknownSchemaRoots(t *testing.T) { test := testUtils.TestCase{ Description: "Test schema migration, unknown schema ids", Actions: []any{ @@ -91,8 +91,8 @@ func TestSchemaMigrationGetMigrationsReturnsMultiple(t *testing.T) { }, testUtils.ConfigureMigration{ LensConfig: client.LensConfig{ - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { @@ -124,8 +124,8 @@ func TestSchemaMigrationGetMigrationsReturnsMultiple(t *testing.T) { }, }, { - SourceSchemaVersionID: "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq", - DestinationSchemaVersionID: "bafkreia56p6i6o3l4jijayiqd5eiijsypjjokbldaxnmqgeav6fe576hcy", + SourceSchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + DestinationSchemaVersionID: "bafkreiaa3njstjciqclhh4dzv2xaw32tfxxbrbembdvwqfmuuqai3ghu7a", Lens: model.Lens{ Lenses: []model.LensModule{ { diff --git a/tests/integration/schema/simple_test.go b/tests/integration/schema/simple_test.go index 47ef9810be..6bcb2a1dec 100644 --- a/tests/integration/schema/simple_test.go +++ b/tests/integration/schema/simple_test.go @@ -13,10 +13,15 @@ package schema import ( "testing" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) func TestSchemaSimpleCreatesSchemaGivenEmptyType(t *testing.T) { + schemaVersionID := "bafkreickgf3nbjaairxkkqawmrv7fafaafyccl4qygqeveagisdn42eohu" + test := testUtils.TestCase{ Actions: []any{ testUtils.SchemaUpdate{ @@ -38,6 +43,22 @@ func TestSchemaSimpleCreatesSchemaGivenEmptyType(t *testing.T) { }, }, }, + testUtils.GetSchema{ + VersionID: immutable.Some(schemaVersionID), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + VersionID: schemaVersionID, + Root: schemaVersionID, + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + }, + }, + }, + }, + }, }, } diff --git a/tests/integration/schema/updates/add/field/crdt/composite_test.go b/tests/integration/schema/updates/add/field/crdt/composite_test.go index 1ad0dc06d2..e1891f95f7 100644 --- a/tests/integration/schema/updates/add/field/crdt/composite_test.go +++ b/tests/integration/schema/updates/add/field/crdt/composite_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldCRDTCompositeErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":3} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":3} } ] `, ExpectedError: "only default or LWW (last writer wins) CRDT types are supported. Name: foo, CRDTType: 3", diff --git a/tests/integration/schema/updates/add/field/crdt/invalid_test.go b/tests/integration/schema/updates/add/field/crdt/invalid_test.go index 0c899155fb..dee615dac2 100644 --- a/tests/integration/schema/updates/add/field/crdt/invalid_test.go +++ b/tests/integration/schema/updates/add/field/crdt/invalid_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldCRDTInvalidErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":99} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":99} } ] `, ExpectedError: "only default or LWW (last writer wins) CRDT types are supported. Name: foo, CRDTType: 99", diff --git a/tests/integration/schema/updates/add/field/crdt/lww_test.go b/tests/integration/schema/updates/add/field/crdt/lww_test.go index c8a4b93007..5d75d4db6e 100644 --- a/tests/integration/schema/updates/add/field/crdt/lww_test.go +++ b/tests/integration/schema/updates/add/field/crdt/lww_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldCRDTLWW(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":1} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":1} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/crdt/none_test.go b/tests/integration/schema/updates/add/field/crdt/none_test.go index 2ed83e3898..c49faa3904 100644 --- a/tests/integration/schema/updates/add/field/crdt/none_test.go +++ b/tests/integration/schema/updates/add/field/crdt/none_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldCRDTDefault(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldCRDTNone(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":0} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":0} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/crdt/object_bool_test.go b/tests/integration/schema/updates/add/field/crdt/object_bool_test.go index 5d87c8a57e..d36af59dc1 100644 --- a/tests/integration/schema/updates/add/field/crdt/object_bool_test.go +++ b/tests/integration/schema/updates/add/field/crdt/object_bool_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldCRDTObjectWithBoolFieldErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":2} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2, "Typ":2} } ] `, ExpectedError: "only default or LWW (last writer wins) CRDT types are supported. Name: foo, CRDTType: 2", diff --git a/tests/integration/schema/updates/add/field/create_test.go b/tests/integration/schema/updates/add/field/create_test.go index a6a14f2142..d59df6c294 100644 --- a/tests/integration/schema/updates/add/field/create_test.go +++ b/tests/integration/schema/updates/add/field/create_test.go @@ -36,7 +36,7 @@ func TestSchemaUpdatesAddFieldWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -84,7 +84,7 @@ func TestSchemaUpdatesAddFieldWithCreateAfterSchemaUpdate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/create_update_test.go b/tests/integration/schema/updates/add/field/create_update_test.go index 1722531568..0228c205ed 100644 --- a/tests/integration/schema/updates/add/field/create_update_test.go +++ b/tests/integration/schema/updates/add/field/create_update_test.go @@ -17,8 +17,8 @@ import ( ) func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoin(t *testing.T) { - initialSchemaVersionId := "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq" - updatedSchemaVersionId := "bafkreidejaxpsevyijnr4nah4e2l263emwhdaj57fwwv34eu5rea4ff54e" + initialSchemaVersionId := "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq" + updatedSchemaVersionId := "bafkreid5bpw7sipm63l5gxxjrs34yrq2ur5xrzyseez5rnj3pvnvkaya6m" test := testUtils.TestCase{ Description: "Test schema update, add field with update after schema update, verison join", @@ -61,7 +61,7 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoi testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -105,8 +105,8 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndVersionJoi } func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndCommitQuery(t *testing.T) { - initialSchemaVersionId := "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq" - updatedSchemaVersionId := "bafkreidejaxpsevyijnr4nah4e2l263emwhdaj57fwwv34eu5rea4ff54e" + initialSchemaVersionId := "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq" + updatedSchemaVersionId := "bafkreid5bpw7sipm63l5gxxjrs34yrq2ur5xrzyseez5rnj3pvnvkaya6m" test := testUtils.TestCase{ Description: "Test schema update, add field with update after schema update, commits query", @@ -127,7 +127,7 @@ func TestSchemaUpdatesAddFieldWithCreateWithUpdateAfterSchemaUpdateAndCommitQuer testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/bool_array_test.go b/tests/integration/schema/updates/add/field/kind/bool_array_test.go index ee8d53644e..64df51a18c 100644 --- a/tests/integration/schema/updates/add/field/kind/bool_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/bool_array_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindBoolArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 3} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 3} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindBoolArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 3} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 3} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindBoolArraySubstitutionWithCreate(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Boolean!]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[Boolean!]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/bool_nil_array_test.go b/tests/integration/schema/updates/add/field/kind/bool_nil_array_test.go index e0c664127b..899d0cba36 100644 --- a/tests/integration/schema/updates/add/field/kind/bool_nil_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/bool_nil_array_test.go @@ -32,7 +32,7 @@ func TestSchemaUpdatesAddFieldKindNillableBoolArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 18} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 18} } ] `, }, @@ -64,7 +64,7 @@ func TestSchemaUpdatesAddFieldKindNillableBoolArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 18} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 18} } ] `, }, @@ -108,7 +108,7 @@ func TestSchemaUpdatesAddFieldKindNillableBoolArraySubstitutionWithCreate(t *tes testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Boolean]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[Boolean]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/bool_test.go b/tests/integration/schema/updates/add/field/kind/bool_test.go index 7be3801bc3..c77d187dbb 100644 --- a/tests/integration/schema/updates/add/field/kind/bool_test.go +++ b/tests/integration/schema/updates/add/field/kind/bool_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindBool(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindBoolWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 2} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 2} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindBoolSubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "Boolean"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "Boolean"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/datetime_test.go b/tests/integration/schema/updates/add/field/kind/datetime_test.go index 5363864c47..6ebcc3af6f 100644 --- a/tests/integration/schema/updates/add/field/kind/datetime_test.go +++ b/tests/integration/schema/updates/add/field/kind/datetime_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindDateTime(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 10} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 10} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindDateTimeWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 4} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 4} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindDateTimeSubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "DateTime"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "DateTime"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/dockey_test.go b/tests/integration/schema/updates/add/field/kind/dockey_test.go index f4d5d9aabe..6d8aca4736 100644 --- a/tests/integration/schema/updates/add/field/kind/dockey_test.go +++ b/tests/integration/schema/updates/add/field/kind/dockey_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindDocKey(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 1} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 1} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindDocKeyWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 1} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 1} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindDocKeySubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "ID"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "ID"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/float_array_test.go b/tests/integration/schema/updates/add/field/kind/float_array_test.go index 86e8ddd882..dcf9fd3d42 100644 --- a/tests/integration/schema/updates/add/field/kind/float_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/float_array_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindFloatArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 7} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 7} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindFloatArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 7} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 7} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindFloatArraySubstitutionWithCreate(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Float!]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[Float!]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/float_nil_array_test.go b/tests/integration/schema/updates/add/field/kind/float_nil_array_test.go index 4cb1bb8133..9dd4209a38 100644 --- a/tests/integration/schema/updates/add/field/kind/float_nil_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/float_nil_array_test.go @@ -32,7 +32,7 @@ func TestSchemaUpdatesAddFieldKindNillableFloatArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 20} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 20} } ] `, }, @@ -64,7 +64,7 @@ func TestSchemaUpdatesAddFieldKindNillableFloatArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 20} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 20} } ] `, }, @@ -112,7 +112,7 @@ func TestSchemaUpdatesAddFieldKindNillableFloatArraySubstitutionWithCreate(t *te testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Float]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[Float]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/float_test.go b/tests/integration/schema/updates/add/field/kind/float_test.go index 9411a4e7d1..b145d4c148 100644 --- a/tests/integration/schema/updates/add/field/kind/float_test.go +++ b/tests/integration/schema/updates/add/field/kind/float_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindFloat(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 6} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 6} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindFloatWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 6} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 6} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindFloatSubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "Float"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "Float"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go b/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go index af852c8dd6..a3dc12fb13 100644 --- a/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/foreign_object_array_test.go @@ -31,7 +31,7 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 17} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 17} } ] `, ExpectedError: "a `Schema` [name] must be provided when adding a new relation field. Field: foo, Kind: 17", @@ -55,10 +55,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_InvalidSchemaJson(t *testin testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 17, "Schema": 123} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 17, "Schema": 123} } ] `, - ExpectedError: "json: cannot unmarshal number into Go struct field FieldDescription.Schema.Fields.Schema of type string", + ExpectedError: "json: cannot unmarshal number into Go struct field FieldDescription.Fields.Schema of type string", }, }, } @@ -79,7 +79,7 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_MissingRelationType(t *test testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 17, "Schema": "Users"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 17, "Schema": "Users"} } ] `, ExpectedError: "invalid RelationType. Field: foo, Expected: 10, Actual: 0", @@ -103,7 +103,7 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_MissingRelationName(t *test testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 17, "RelationType": 10, "Schema": "Users" }} ] @@ -129,10 +129,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldMissingKind(t *testi testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id"} } ] `, ExpectedError: "relational id field of invalid kind. Field: foo_id, Expected: ID, Actual: 0", @@ -156,10 +156,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldInvalidKind(t *testi testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 2} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 2} } ] `, ExpectedError: "relational id field of invalid kind. Field: foo_id, Expected: ID, Actual: Boolean", @@ -183,10 +183,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldMissingRelationType( testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 1} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1} } ] `, ExpectedError: "invalid RelationType. Field: foo_id, Expected: 64, Actual: 0", @@ -210,10 +210,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldInvalidRelationType( testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 4} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 4} } ] `, ExpectedError: "invalid RelationType. Field: foo_id, Expected: 64, Actual: 4", @@ -237,10 +237,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_IDFieldMissingRelationName( testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 64} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 64} } ] `, ExpectedError: "missing relation name. Field: foo_id", @@ -264,10 +264,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_OnlyHalfRelationDefined(t * testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -293,13 +293,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_NoPrimaryDefined(t *testing testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 9, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 17, "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -325,13 +325,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_PrimaryDefinedOnManySide(t testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 9, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 17, "RelationType": 138, "Schema": "Users", "RelationName": "foo" }} ] @@ -357,13 +357,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_RelatedKindMismatch(t *test testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -389,13 +389,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_RelatedKindAndRelationTypeM testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 9, "Schema": "Users", "RelationName": "foo" }} ] @@ -421,13 +421,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_RelatedRelationTypeMismatch testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" }} ] @@ -455,13 +455,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_Succeeds(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 17, "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -551,13 +551,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_SinglePrimaryObjectKindSubs testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 17, "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -630,13 +630,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_SingleSecondaryObjectKindSu testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -709,13 +709,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_ObjectKindSubstitution(t *t testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -788,13 +788,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_ObjectKindSubstitutionWithA testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "RelationName": "foo" }} ] @@ -872,13 +872,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_PrimaryObjectKindAndSchemaM testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "Schema": "Dog", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "Schema": "Users", "RelationName": "foo" }} ] @@ -911,13 +911,13 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_SecondaryObjectKindAndSchem testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "Schema": "Dog", "RelationName": "foo" }} ] @@ -945,10 +945,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_MissingPrimaryIDField(t *te testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "RelationName": "foo" }} ] @@ -1022,10 +1022,10 @@ func TestSchemaUpdatesAddFieldKindForeignObjectArray_MissingPrimaryIDField_DoesN testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 137, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "[Users]", "RelationType": 10, "RelationName": "foo" }} ] diff --git a/tests/integration/schema/updates/add/field/kind/foreign_object_test.go b/tests/integration/schema/updates/add/field/kind/foreign_object_test.go index e09aa4dfac..21afdec279 100644 --- a/tests/integration/schema/updates/add/field/kind/foreign_object_test.go +++ b/tests/integration/schema/updates/add/field/kind/foreign_object_test.go @@ -31,7 +31,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 16} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 16} } ] `, ExpectedError: "a `Schema` [name] must be provided when adding a new relation field. Field: foo, Kind: 16", @@ -55,10 +55,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_InvalidSchemaJson(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 16, "Schema": 123} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 16, "Schema": 123} } ] `, - ExpectedError: "json: cannot unmarshal number into Go struct field FieldDescription.Schema.Fields.Schema of type string", + ExpectedError: "json: cannot unmarshal number into Go struct field FieldDescription.Fields.Schema of type string", }, }, } @@ -79,7 +79,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject_MissingRelationType(t *testing.T testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 16, "Schema": "Users"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 16, "Schema": "Users"} } ] `, ExpectedError: "invalid RelationType. Field: foo, Expected: 1 and 4 or 8, with optionally 128, Actual: 0", @@ -103,7 +103,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject_UnknownSchema(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 5, "Schema": "Unknown" }} ] @@ -129,7 +129,7 @@ func TestSchemaUpdatesAddFieldKindForeignObject_MissingRelationName(t *testing.T testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 5, "Schema": "Users" }} ] @@ -155,10 +155,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldMissingKind(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id"} } ] `, ExpectedError: "relational id field of invalid kind. Field: foo_id, Expected: ID, Actual: 0", @@ -182,10 +182,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldInvalidKind(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 2} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 2} } ] `, ExpectedError: "relational id field of invalid kind. Field: foo_id, Expected: ID, Actual: Boolean", @@ -209,10 +209,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldMissingRelationType(t *te testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 1} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1} } ] `, ExpectedError: "invalid RelationType. Field: foo_id, Expected: 64, Actual: 0", @@ -236,10 +236,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldInvalidRelationType(t *te testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 4} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 4} } ] `, ExpectedError: "invalid RelationType. Field: foo_id, Expected: 64, Actual: 4", @@ -263,10 +263,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_IDFieldMissingRelationName(t *te testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 64} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo_id", "Kind": 1, "RelationType": 64} } ] `, ExpectedError: "missing relation name. Field: foo_id", @@ -290,10 +290,10 @@ func TestSchemaUpdatesAddFieldKindForeignObject_OnlyHalfRelationDefined(t *testi testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -319,13 +319,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_NoPrimaryDefined(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" }} ] @@ -351,16 +351,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_BothSidesPrimary(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "Schema": "Users", "RelationName": "foo" }} ] @@ -386,13 +386,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_RelatedKindMismatch(t *testing.T testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 17, "RelationType": 5, "Schema": "Users", "RelationName": "foo" }} ] @@ -418,13 +418,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_RelatedRelationTypeMismatch(t *t testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 9, "Schema": "Users", "RelationName": "foo" }} ] @@ -452,16 +452,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_Succeeds(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -549,16 +549,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_SinglePrimaryObjectKindSubstitut testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": 16, "RelationType": 5, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -629,16 +629,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_SingleSecondaryObjectKindSubstit testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": 16, "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "Users", "RelationType": 5, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -709,16 +709,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_ObjectKindSubstitution(t *testin testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 133, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "Users", "RelationType": 5, "Schema": "Users", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -789,16 +789,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_ObjectKindSubstitutionWithAutoSc testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 133, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "Users", "RelationType": 5, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -874,16 +874,16 @@ func TestSchemaUpdatesAddFieldKindForeignObject_ObjectKindAndSchemaMismatch(t *t testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 133, "Schema": "Dog", "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "Users", "RelationType": 5, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -911,13 +911,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_MissingPrimaryIDField(t *testing testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 133, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "Users", "RelationType": 5, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }} ] @@ -989,13 +989,13 @@ func TestSchemaUpdatesAddFieldKindForeignObject_MissingSecondaryIDField(t *testi testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo", "Kind": "Users", "RelationType": 133, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foo_id", "Kind": 1, "RelationType": 64, "RelationName": "foo" }}, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": { + { "op": "add", "path": "/Users/Fields/-", "value": { "Name": "foobar", "Kind": "Users", "RelationType": 5, "RelationName": "foo" }} ] diff --git a/tests/integration/schema/updates/add/field/kind/int_array_test.go b/tests/integration/schema/updates/add/field/kind/int_array_test.go index 4e7c732ec1..9a6d9e69af 100644 --- a/tests/integration/schema/updates/add/field/kind/int_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/int_array_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindIntArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 5} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 5} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindIntArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 5} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 5} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindIntArraySubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Int!]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[Int!]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/int_nil_array_test.go b/tests/integration/schema/updates/add/field/kind/int_nil_array_test.go index 0642ffa894..0de26958bb 100644 --- a/tests/integration/schema/updates/add/field/kind/int_nil_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/int_nil_array_test.go @@ -32,7 +32,7 @@ func TestSchemaUpdatesAddFieldKindNillableIntArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 19} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 19} } ] `, }, @@ -64,7 +64,7 @@ func TestSchemaUpdatesAddFieldKindNillableIntArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 19} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 19} } ] `, }, @@ -112,7 +112,7 @@ func TestSchemaUpdatesAddFieldKindNillableIntArraySubstitutionWithCreate(t *test testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[Int]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[Int]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/int_test.go b/tests/integration/schema/updates/add/field/kind/int_test.go index 3e12ed9106..70b3294a84 100644 --- a/tests/integration/schema/updates/add/field/kind/int_test.go +++ b/tests/integration/schema/updates/add/field/kind/int_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindInt(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 4} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 4} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindIntWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 4} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 4} } ] `, }, @@ -83,7 +83,7 @@ func TestSchemaUpdatesAddFieldKindIntWithCreate(t *testing.T) { Results: []map[string]any{ { "name": "John", - "foo": uint64(3), + "foo": int64(3), }, }, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindIntSubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "Int"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "Int"} } ] `, }, @@ -127,7 +127,7 @@ func TestSchemaUpdatesAddFieldKindIntSubstitutionWithCreate(t *testing.T) { Results: []map[string]any{ { "name": "John", - "foo": uint64(3), + "foo": int64(3), }, }, }, diff --git a/tests/integration/schema/updates/add/field/kind/invalid_test.go b/tests/integration/schema/updates/add/field/kind/invalid_test.go index fa7556a86b..5e578e5307 100644 --- a/tests/integration/schema/updates/add/field/kind/invalid_test.go +++ b/tests/integration/schema/updates/add/field/kind/invalid_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKind8(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 8} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 8} } ] `, ExpectedError: "no type found for given name. Type: 8", @@ -54,7 +54,7 @@ func TestSchemaUpdatesAddFieldKind9(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 9} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 9} } ] `, ExpectedError: "no type found for given name. Type: 9", @@ -78,7 +78,7 @@ func TestSchemaUpdatesAddFieldKind13(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 13} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 13} } ] `, ExpectedError: "no type found for given name. Type: 13", @@ -102,7 +102,7 @@ func TestSchemaUpdatesAddFieldKind14(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 14} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 14} } ] `, ExpectedError: "no type found for given name. Type: 14", @@ -126,7 +126,7 @@ func TestSchemaUpdatesAddFieldKind15(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 15} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 15} } ] `, ExpectedError: "no type found for given name. Type: 15", @@ -152,7 +152,7 @@ func TestSchemaUpdatesAddFieldKind22(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 22} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 22} } ] `, ExpectedError: "no type found for given name. Type: 22", @@ -178,7 +178,7 @@ func TestSchemaUpdatesAddFieldKind198(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 198} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 198} } ] `, ExpectedError: "no type found for given name. Type: 198", @@ -202,7 +202,7 @@ func TestSchemaUpdatesAddFieldKindInvalidSubstitution(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "InvalidKind"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "InvalidKind"} } ] `, ExpectedError: "no type found for given name. Kind: InvalidKind", diff --git a/tests/integration/schema/updates/add/field/kind/none_test.go b/tests/integration/schema/updates/add/field/kind/none_test.go index 7e8c44dc73..ab6111dadb 100644 --- a/tests/integration/schema/updates/add/field/kind/none_test.go +++ b/tests/integration/schema/updates/add/field/kind/none_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindNone(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 0} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 0} } ] `, ExpectedError: "no type found for given name. Type: 0", diff --git a/tests/integration/schema/updates/add/field/kind/string_array_test.go b/tests/integration/schema/updates/add/field/kind/string_array_test.go index d3e03c8b35..b035162aed 100644 --- a/tests/integration/schema/updates/add/field/kind/string_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/string_array_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindStringArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 12} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 12} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindStringArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 12} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 12} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindStringArraySubstitutionWithCreate(t *testing.T testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[String!]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[String!]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/string_nil_array_test.go b/tests/integration/schema/updates/add/field/kind/string_nil_array_test.go index c34fe22aba..9fc750cc80 100644 --- a/tests/integration/schema/updates/add/field/kind/string_nil_array_test.go +++ b/tests/integration/schema/updates/add/field/kind/string_nil_array_test.go @@ -32,7 +32,7 @@ func TestSchemaUpdatesAddFieldKindNillableStringArray(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 21} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 21} } ] `, }, @@ -64,7 +64,7 @@ func TestSchemaUpdatesAddFieldKindNillableStringArrayWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 21} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 21} } ] `, }, @@ -112,7 +112,7 @@ func TestSchemaUpdatesAddFieldKindNillableStringArraySubstitutionWithCreate(t *t testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "[String]"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "[String]"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/kind/string_test.go b/tests/integration/schema/updates/add/field/kind/string_test.go index f32f9409c4..336c2fe6de 100644 --- a/tests/integration/schema/updates/add/field/kind/string_test.go +++ b/tests/integration/schema/updates/add/field/kind/string_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldKindString(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 11} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldKindStringWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": 11} } ] `, }, @@ -106,7 +106,7 @@ func TestSchemaUpdatesAddFieldKindStringSubstitutionWithCreate(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "foo", "Kind": "String"} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "foo", "Kind": "String"} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/simple_test.go b/tests/integration/schema/updates/add/field/simple_test.go index d64f9e3bbe..69ddfd1734 100644 --- a/tests/integration/schema/updates/add/field/simple_test.go +++ b/tests/integration/schema/updates/add/field/simple_test.go @@ -13,10 +13,16 @@ package field import ( "testing" + "github.com/sourcenetwork/immutable" + + "github.com/sourcenetwork/defradb/client" testUtils "github.com/sourcenetwork/defradb/tests/integration" ) func TestSchemaUpdatesAddFieldSimple(t *testing.T) { + schemaVersion1ID := "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq" + schemaVersion2ID := "bafkreid5bpw7sipm63l5gxxjrs34yrq2ur5xrzyseez5rnj3pvnvkaya6m" + test := testUtils.TestCase{ Description: "Test schema update, add field", Actions: []any{ @@ -30,7 +36,7 @@ func TestSchemaUpdatesAddFieldSimple(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -43,6 +49,126 @@ func TestSchemaUpdatesAddFieldSimple(t *testing.T) { }`, Results: []map[string]any{}, }, + testUtils.GetSchema{ + VersionID: immutable.Some(schemaVersion2ID), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + VersionID: schemaVersion2ID, + Root: schemaVersion1ID, + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + ID: 1, + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "email", + ID: 2, + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + }, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaUpdates_AddFieldSimpleDoNotSetDefault_Errors(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, add field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.Request{ + Request: `query { + Users { + name + email + } + }`, + ExpectedError: `Cannot query field "email" on type "Users".`, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchemaUpdates_AddFieldSimpleDoNotSetDefault_VersionIsQueryable(t *testing.T) { + schemaVersion1ID := "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq" + schemaVersion2ID := "bafkreid5bpw7sipm63l5gxxjrs34yrq2ur5xrzyseez5rnj3pvnvkaya6m" + + test := testUtils.TestCase{ + Description: "Test schema update, add field", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.GetSchema{ + VersionID: immutable.Some(schemaVersion2ID), + ExpectedResults: []client.SchemaDescription{ + { + Name: "Users", + // Even though schema version 2 is not active, it should still be possible to + // fetch it. + VersionID: schemaVersion2ID, + Root: schemaVersion1ID, + Fields: []client.FieldDescription{ + { + Name: "_key", + Kind: client.FieldKind_DocKey, + Typ: client.LWW_REGISTER, + }, + { + Name: "name", + ID: 1, + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }, + { + Name: "email", + ID: 2, + Kind: client.FieldKind_STRING, + Typ: client.LWW_REGISTER, + }, + }, + }, + }, + }, }, } testUtils.ExecuteTestCase(t, test) @@ -94,8 +220,8 @@ func TestSchemaUpdatesAddFieldMultipleInPatch(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} }, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "city", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} }, + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "city", "Kind": 11} } ] `, }, @@ -128,14 +254,14 @@ func TestSchemaUpdatesAddFieldMultiplePatches(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "city", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "city", "Kind": 11} } ] `, }, @@ -168,7 +294,7 @@ func TestSchemaUpdatesAddFieldSimpleWithoutName(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Kind": 11} } ] `, ExpectedError: "Names must match /^[_a-zA-Z][_a-zA-Z0-9]*$/ but \"\" does not.", @@ -193,8 +319,8 @@ func TestSchemaUpdatesAddFieldMultipleInPatchPartialSuccess(t *testing.T) { // Email field is valid, City field has invalid kind Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} }, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "city", "Kind": 111} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} }, + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "city", "Kind": 111} } ] `, ExpectedError: "no type found for given name. Type: 111", @@ -237,7 +363,7 @@ func TestSchemaUpdatesAddFieldSimpleDuplicateOfExistingField(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "name", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "name", "Kind": 11} } ] `, ExpectedError: "duplicate field. Name: name", @@ -261,8 +387,8 @@ func TestSchemaUpdatesAddFieldSimpleDuplicateField(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} }, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} }, + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, ExpectedError: "duplicate field. Name: email", @@ -286,7 +412,7 @@ func TestSchemaUpdatesAddFieldWithExplicitIDErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"ID": 2, "Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"ID": 2, "Name": "email", "Kind": 11} } ] `, ExpectedError: "explicitly setting a field ID value is not supported. Field: email, ID: 2", diff --git a/tests/integration/schema/updates/add/field/with_filter_test.go b/tests/integration/schema/updates/add/field/with_filter_test.go index decdb7b997..6161d9a237 100644 --- a/tests/integration/schema/updates/add/field/with_filter_test.go +++ b/tests/integration/schema/updates/add/field/with_filter_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldSimpleWithFilter(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -70,7 +70,7 @@ func TestSchemaUpdatesAddFieldSimpleWithFilterOnPopulatedDatabase(t *testing.T) testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, diff --git a/tests/integration/schema/updates/add/field/with_index_sub_test.go b/tests/integration/schema/updates/add/field/with_index_sub_test.go index eb4dc3d9c0..274e3aac2b 100644 --- a/tests/integration/schema/updates/add/field/with_index_sub_test.go +++ b/tests/integration/schema/updates/add/field/with_index_sub_test.go @@ -30,7 +30,7 @@ func TestSchemaUpdatesAddFieldSimple_FieldIndexedByName(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/email", "value": {"Kind": 11} } + { "op": "add", "path": "/Users/Fields/email", "value": {"Kind": 11} } ] `, }, @@ -62,7 +62,7 @@ func TestSchemaUpdatesAddFieldSimple_FieldIndexedByNameWithSameNameDefinedInValu testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/email", "value": {"Name": "email","Kind": 11} } + { "op": "add", "path": "/Users/Fields/email", "value": {"Name": "email","Kind": 11} } ] `, }, @@ -94,7 +94,7 @@ func TestSchemaUpdatesAddFieldSimple_FieldIndexedByNameWithDifferentNameDefinedI testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/email", "value": {"Name": "different field name","Kind": 11} } + { "op": "add", "path": "/Users/Fields/email", "value": {"Name": "different field name","Kind": 11} } ] `, ExpectedError: "the index used does not match the given name", @@ -118,8 +118,8 @@ func TestSchemaUpdatesAddFieldSimple_FieldIndexedByNameMultipleTimes(t *testing. testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/email", "value": {"Kind": 11} }, - { "op": "test", "path": "/Users/Schema/Fields/email/Kind", "value": 11 } + { "op": "add", "path": "/Users/Fields/email", "value": {"Kind": 11} }, + { "op": "test", "path": "/Users/Fields/email/Kind", "value": 11 } ] `, }, diff --git a/tests/integration/schema/updates/add/field/with_introspection_test.go b/tests/integration/schema/updates/add/field/with_introspection_test.go index df75ac43c3..ea9885674c 100644 --- a/tests/integration/schema/updates/add/field/with_introspection_test.go +++ b/tests/integration/schema/updates/add/field/with_introspection_test.go @@ -29,7 +29,7 @@ func TestSchemaUpdatesAddFieldIntrospection(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "name", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "name", "Kind": 11} } ] `, }, @@ -82,8 +82,8 @@ func TestSchemaUpdatesAddFieldIntrospectionDoesNotAmendGQLTypesGivenBadPatch(t * // [Name] should not be added to the GQL types. Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "name", "Kind": 11} }, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 111} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "name", "Kind": 11} }, + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 111} } ] `, ExpectedError: "no type found for given name. Type: 111", diff --git a/tests/integration/schema/updates/add/simple_test.go b/tests/integration/schema/updates/add/simple_test.go index b8e4ce3a5f..0eac29b49a 100644 --- a/tests/integration/schema/updates/add/simple_test.go +++ b/tests/integration/schema/updates/add/simple_test.go @@ -86,7 +86,7 @@ func TestSchemaUpdatesAddSimpleErrorsAddingSchemaProp(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/-", "value": {"Foo": "Bar"} } + { "op": "add", "path": "/Users/-", "value": {"Foo": "Bar"} } ] `, ExpectedError: `json: unknown field "-"`, @@ -142,7 +142,7 @@ func TestSchemaUpdatesAddSimpleErrorsAddingUnsupportedSchemaProp(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Foo/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Foo/-", "value": {"Name": "email", "Kind": 11} } ] `, ExpectedError: "add operation does not apply: doc is missing path", diff --git a/tests/integration/schema/updates/copy/field/simple_test.go b/tests/integration/schema/updates/copy/field/simple_test.go index ff0680f55d..f10569dabd 100644 --- a/tests/integration/schema/updates/copy/field/simple_test.go +++ b/tests/integration/schema/updates/copy/field/simple_test.go @@ -31,7 +31,7 @@ func TestSchemaUpdatesCopyFieldErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/2" } + { "op": "copy", "from": "/Users/Fields/1", "path": "/Users/Fields/2" } ] `, ExpectedError: "duplicate field. Name: email", @@ -67,9 +67,9 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceName(t *testing.T) { // clone. Patch: ` [ - { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/3" }, - { "op": "remove", "path": "/Users/Schema/Fields/3/ID" }, - { "op": "replace", "path": "/Users/Schema/Fields/3/Name", "value": "fax" } + { "op": "copy", "from": "/Users/Fields/1", "path": "/Users/Fields/3" }, + { "op": "remove", "path": "/Users/Fields/3/ID" }, + { "op": "replace", "path": "/Users/Fields/3/Name", "value": "fax" } ] `, }, @@ -105,10 +105,10 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndKindSubstitution(t * // re-typing the clone. Patch: ` [ - { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/2" }, - { "op": "remove", "path": "/Users/Schema/Fields/2/ID" }, - { "op": "replace", "path": "/Users/Schema/Fields/2/Name", "value": "age" }, - { "op": "replace", "path": "/Users/Schema/Fields/2/Kind", "value": "Int" } + { "op": "copy", "from": "/Users/Fields/1", "path": "/Users/Fields/2" }, + { "op": "remove", "path": "/Users/Fields/2/ID" }, + { "op": "replace", "path": "/Users/Fields/2/Name", "value": "age" }, + { "op": "replace", "path": "/Users/Fields/2/Kind", "value": "Int" } ] `, }, @@ -130,7 +130,7 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndKindSubstitution(t * { "name": "John", // It is important to test this with data, to ensure the type has been substituted correctly - "age": uint64(3), + "age": int64(3), }, }, }, @@ -156,10 +156,10 @@ func TestSchemaUpdatesCopyFieldWithRemoveIDAndReplaceNameAndInvalidKindSubstitut // re-typing the clone. Patch: ` [ - { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/2" }, - { "op": "remove", "path": "/Users/Schema/Fields/2/ID" }, - { "op": "replace", "path": "/Users/Schema/Fields/2/Name", "value": "Age" }, - { "op": "replace", "path": "/Users/Schema/Fields/2/Kind", "value": "NotAValidKind" } + { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Fields/2" }, + { "op": "remove", "path": "/Users/Fields/2/ID" }, + { "op": "replace", "path": "/Users/Fields/2/Name", "value": "Age" }, + { "op": "replace", "path": "/Users/Fields/2/Kind", "value": "NotAValidKind" } ] `, ExpectedError: "no type found for given name. Kind: NotAValidKind", diff --git a/tests/integration/schema/updates/copy/field/with_introspection_test.go b/tests/integration/schema/updates/copy/field/with_introspection_test.go index 566b18db7c..2106d22b1b 100644 --- a/tests/integration/schema/updates/copy/field/with_introspection_test.go +++ b/tests/integration/schema/updates/copy/field/with_introspection_test.go @@ -31,9 +31,9 @@ func TestSchemaUpdatesCopyFieldIntrospectionWithRemoveIDAndReplaceName(t *testin testUtils.SchemaPatch{ Patch: ` [ - { "op": "copy", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/2" }, - { "op": "remove", "path": "/Users/Schema/Fields/2/ID" }, - { "op": "replace", "path": "/Users/Schema/Fields/2/Name", "value": "fax" } + { "op": "copy", "from": "/Users/Fields/1", "path": "/Users/Fields/2" }, + { "op": "remove", "path": "/Users/Fields/2/ID" }, + { "op": "replace", "path": "/Users/Fields/2/Name", "value": "fax" } ] `, }, diff --git a/tests/integration/schema/updates/copy/simple_test.go b/tests/integration/schema/updates/copy/simple_test.go index 5b4c19ed22..96fc3a0025 100644 --- a/tests/integration/schema/updates/copy/simple_test.go +++ b/tests/integration/schema/updates/copy/simple_test.go @@ -33,12 +33,10 @@ func TestSchemaUpdatesCopyCollectionWithRemoveIDAndReplaceName(t *testing.T) { Patch: ` [ { "op": "copy", "from": "/Users", "path": "/Book" }, - { "op": "remove", "path": "/Book/ID" }, - { "op": "remove", "path": "/Book/Schema/SchemaID" }, - { "op": "remove", "path": "/Book/Schema/VersionID" }, - { "op": "remove", "path": "/Book/Schema/Fields/1/ID" }, - { "op": "replace", "path": "/Book/Name", "value": "Book" }, - { "op": "replace", "path": "/Book/Schema/Name", "value": "Book" } + { "op": "remove", "path": "/Book/Root" }, + { "op": "remove", "path": "/Book/VersionID" }, + { "op": "remove", "path": "/Book/Fields/1/ID" }, + { "op": "replace", "path": "/Book/Name", "value": "Book" } ] `, ExpectedError: "unknown collection, adding collections via patch is not supported. Name: Book", diff --git a/tests/integration/schema/updates/index/simple_test.go b/tests/integration/schema/updates/index/simple_test.go index 970ef2bb86..fb506ec623 100644 --- a/tests/integration/schema/updates/index/simple_test.go +++ b/tests/integration/schema/updates/index/simple_test.go @@ -31,7 +31,7 @@ func TestPatching_ForCollectionWithIndex_StillWorks(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -49,185 +49,3 @@ func TestPatching_ForCollectionWithIndex_StillWorks(t *testing.T) { } testUtils.ExecuteTestCase(t, test) } - -func TestPatching_IfAttemptToAddIndex_ReturnError(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test adding index to collection via patch fails", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String @index - age: Int - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "add", "path": "/Users/Indexes/-", "value": { - "Name": "some_index", - "ID": 0, - "Fields": [ - { - "Name": "age", - "Direction": "ASC" - } - ] - } - } - ] - `, - ExpectedError: "adding indexes via patch is not supported. ProposedName: some_index", - }, - testUtils.Request{ - Request: `query { - Users { - name - } - }`, - Results: []map[string]any{}, - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestPatching_IfAttemptToDropIndex_ReturnError(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test dropping index from collection via patch fails", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String @index - age: Int @index(name: "users_age_index") - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "remove", "path": "/Users/Indexes/1" } - ] - `, - ExpectedError: "dropping indexes via patch is not supported. Name: users_age_index", - }, - testUtils.Request{ - Request: `query { - Users { - name - } - }`, - Results: []map[string]any{}, - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestPatching_IfAttemptToChangeIndexName_ReturnError(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test changing index's name via patch fails", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String @index - age: Int - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "replace", "path": "/Users/Indexes/0/Name", "value": "new_index_name" } - ] - `, - ExpectedError: "adding indexes via patch is not supported. ProposedName: new_index_name", - }, - testUtils.Request{ - Request: `query { - Users { - name - } - }`, - Results: []map[string]any{}, - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestPatching_IfAttemptToChangeIndexField_ReturnError(t *testing.T) { - testCases := []struct { - description string - patch string - }{ - { - description: "Test adding a field to an index via patch fails", - patch: ` - [ - { "op": "add", "path": "/Users/Indexes/0/Fields/-", "value": { - "Name": "age", - "Direction": "ASC" - } - } - ] - `, - }, - { - description: "Test removing a field from an index via patch fails", - patch: ` - [ - { "op": "remove", "path": "/Users/Indexes/0/Fields/0" } - ] - `, - }, - { - description: "Test changing index's field name via patch fails", - patch: ` - [ - { "op": "replace", "path": "/Users/Indexes/0/Fields/0/Name", "value": "new_field_name" } - ] - `, - }, - { - description: "Test changing index's field direction via patch fails", - patch: ` - [ - { "op": "replace", "path": "/Users/Indexes/0/Fields/0/Direction", "value": "DESC" } - ] - `, - }, - } - - for _, testCase := range testCases { - test := testUtils.TestCase{ - Description: testCase.description, - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String @index - age: Int - } - `, - }, - testUtils.SchemaPatch{ - Patch: testCase.patch, - ExpectedError: "changing indexes via patch is not supported", - }, - testUtils.Request{ - Request: `query { - Users { - name - } - }`, - Results: []map[string]any{}, - }, - }, - } - testUtils.ExecuteTestCase(t, test) - } -} diff --git a/tests/integration/schema/updates/move/field/simple_test.go b/tests/integration/schema/updates/move/field/simple_test.go index 197b9410b7..9a678e8ab5 100644 --- a/tests/integration/schema/updates/move/field/simple_test.go +++ b/tests/integration/schema/updates/move/field/simple_test.go @@ -31,7 +31,7 @@ func TestSchemaUpdatesMoveFieldErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "move", "from": "/Users/Schema/Fields/1", "path": "/Users/Schema/Fields/-" } + { "op": "move", "from": "/Users/Fields/1", "path": "/Users/Fields/-" } ] `, ExpectedError: "moving fields is not currently supported. Name: name, ProposedIndex: 1, ExistingIndex: 2", diff --git a/tests/integration/schema/updates/move/simple_test.go b/tests/integration/schema/updates/move/simple_test.go index 60e0611746..e6d8bb1556 100644 --- a/tests/integration/schema/updates/move/simple_test.go +++ b/tests/integration/schema/updates/move/simple_test.go @@ -17,7 +17,7 @@ import ( ) func TestSchemaUpdatesMoveCollectionDoesNothing(t *testing.T) { - schemaVersionID := "bafkreihn4qameldz3j7rfundmd4ldhxnaircuulk6h2vcwnpcgxl4oqffq" + schemaVersionID := "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq" test := testUtils.TestCase{ Description: "Test schema update, move collection", diff --git a/tests/integration/schema/updates/remove/fields/simple_test.go b/tests/integration/schema/updates/remove/fields/simple_test.go index f4fa6c2482..515a8736e5 100644 --- a/tests/integration/schema/updates/remove/fields/simple_test.go +++ b/tests/integration/schema/updates/remove/fields/simple_test.go @@ -31,7 +31,7 @@ func TestSchemaUpdatesRemoveFieldErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Fields/2" } + { "op": "remove", "path": "/Users/Fields/2" } ] `, ExpectedError: "deleting an existing field is not supported. Name: name", @@ -56,7 +56,7 @@ func TestSchemaUpdatesRemoveAllFieldsErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Fields" } + { "op": "remove", "path": "/Users/Fields" } ] `, ExpectedError: "deleting an existing field is not supported", @@ -81,7 +81,7 @@ func TestSchemaUpdatesRemoveFieldNameErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Fields/2/Name" } + { "op": "remove", "path": "/Users/Fields/2/Name" } ] `, ExpectedError: "mutating an existing field is not supported. ID: 2, ProposedName: ", @@ -106,7 +106,7 @@ func TestSchemaUpdatesRemoveFieldIDErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Fields/2/ID" } + { "op": "remove", "path": "/Users/Fields/2/ID" } ] `, ExpectedError: "deleting an existing field is not supported. Name: name, ID: 2", @@ -131,7 +131,7 @@ func TestSchemaUpdatesRemoveFieldKindErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Fields/2/Kind" } + { "op": "remove", "path": "/Users/Fields/2/Kind" } ] `, ExpectedError: "mutating an existing field is not supported. ID: 2, ProposedName: ", @@ -156,7 +156,7 @@ func TestSchemaUpdatesRemoveFieldTypErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Fields/2/Typ" } + { "op": "remove", "path": "/Users/Fields/2/Typ" } ] `, ExpectedError: "mutating an existing field is not supported. ID: 2, ProposedName: ", @@ -185,7 +185,7 @@ func TestSchemaUpdatesRemoveFieldSchemaErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Author/Schema/Fields/1/Schema" } + { "op": "remove", "path": "/Author/Fields/1/Schema" } ] `, ExpectedError: "mutating an existing field is not supported. ID: 1, ProposedName: book", @@ -214,7 +214,7 @@ func TestSchemaUpdatesRemoveFieldRelationNameErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Author/Schema/Fields/1/RelationName" } + { "op": "remove", "path": "/Author/Fields/1/RelationName" } ] `, ExpectedError: "mutating an existing field is not supported. ID: 1, ProposedName: book", @@ -243,7 +243,7 @@ func TestSchemaUpdatesRemoveFieldRelationTypeErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Author/Schema/Fields/1/RelationType" } + { "op": "remove", "path": "/Author/Fields/1/RelationType" } ] `, ExpectedError: "mutating an existing field is not supported. ID: 1, ProposedName: book", diff --git a/tests/integration/schema/updates/remove/simple_test.go b/tests/integration/schema/updates/remove/simple_test.go index 1bbb956f4b..e9e4f139ae 100644 --- a/tests/integration/schema/updates/remove/simple_test.go +++ b/tests/integration/schema/updates/remove/simple_test.go @@ -34,16 +34,16 @@ func TestSchemaUpdatesRemoveCollectionNameErrors(t *testing.T) { { "op": "remove", "path": "/Users/Name" } ] `, - ExpectedError: "collection name can't be empty", + ExpectedError: "schema name can't be empty", }, }, } testUtils.ExecuteTestCase(t, test) } -func TestSchemaUpdatesRemoveCollectionIDErrors(t *testing.T) { +func TestSchemaUpdatesRemoveSchemaRootErrors(t *testing.T) { test := testUtils.TestCase{ - Description: "Test schema update, remove collection id", + Description: "Test schema update, remove schema root", Actions: []any{ testUtils.SchemaUpdate{ Schema: ` @@ -56,35 +56,10 @@ func TestSchemaUpdatesRemoveCollectionIDErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/ID" } + { "op": "remove", "path": "/Users/Root" } ] `, - ExpectedError: "CollectionID does not match existing. Name: Users, ExistingID: 1, ProposedID: 0", - }, - }, - } - testUtils.ExecuteTestCase(t, test) -} - -func TestSchemaUpdatesRemoveSchemaIDErrors(t *testing.T) { - test := testUtils.TestCase{ - Description: "Test schema update, remove schema ID", - Actions: []any{ - testUtils.SchemaUpdate{ - Schema: ` - type Users { - name: String - email: String - } - `, - }, - testUtils.SchemaPatch{ - Patch: ` - [ - { "op": "remove", "path": "/Users/Schema/SchemaID" } - ] - `, - ExpectedError: "SchemaID does not match existing", + ExpectedError: "SchemaRoot does not match existing", }, }, } @@ -107,7 +82,7 @@ func TestSchemaUpdatesRemoveSchemaVersionIDErrors(t *testing.T) { // This should do nothing Patch: ` [ - { "op": "remove", "path": "/Users/Schema/VersionID" } + { "op": "remove", "path": "/Users/VersionID" } ] `, }, @@ -140,10 +115,10 @@ func TestSchemaUpdatesRemoveSchemaNameErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "remove", "path": "/Users/Schema/Name" } + { "op": "remove", "path": "/Users/Name" } ] `, - ExpectedError: "modifying the schema name is not supported. ExistingName: Users, ProposedName: ", + ExpectedError: "schema name can't be empty", }, }, } diff --git a/tests/integration/schema/updates/replace/field/simple_test.go b/tests/integration/schema/updates/replace/field/simple_test.go index e56f708f99..057b8fe9b7 100644 --- a/tests/integration/schema/updates/replace/field/simple_test.go +++ b/tests/integration/schema/updates/replace/field/simple_test.go @@ -31,7 +31,7 @@ func TestSchemaUpdatesReplaceFieldErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "replace", "path": "/Users/Schema/Fields/2", "value": {"Name": "Fax", "Kind": 11} } + { "op": "replace", "path": "/Users/Fields/2", "value": {"Name": "Fax", "Kind": 11} } ] `, ExpectedError: "deleting an existing field is not supported. Name: name, ID: 2", @@ -56,7 +56,7 @@ func TestSchemaUpdatesReplaceFieldWithIDErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "replace", "path": "/Users/Schema/Fields/2", "value": {"ID":2, "Name": "fax", "Kind": 11} } + { "op": "replace", "path": "/Users/Fields/2", "value": {"ID":2, "Name": "fax", "Kind": 11} } ] `, ExpectedError: "mutating an existing field is not supported. ID: 2, ProposedName: fax", diff --git a/tests/integration/schema/updates/replace/simple_test.go b/tests/integration/schema/updates/replace/simple_test.go index 600a12b69c..7729a274c9 100644 --- a/tests/integration/schema/updates/replace/simple_test.go +++ b/tests/integration/schema/updates/replace/simple_test.go @@ -34,12 +34,9 @@ func TestSchemaUpdatesReplaceCollectionErrors(t *testing.T) { { "op": "replace", "path": "/Users", "value": { "Name": "Book", - "Schema": { - "Name": "Book", - "Fields": [ - {"Name": "name", "Kind": 11} - ] - } + "Fields": [ + {"Name": "name", "Kind": 11} + ] } } ] diff --git a/tests/integration/schema/updates/test/add_field_test.go b/tests/integration/schema/updates/test/add_field_test.go index 179dddbc43..31eec344f2 100644 --- a/tests/integration/schema/updates/test/add_field_test.go +++ b/tests/integration/schema/updates/test/add_field_test.go @@ -30,8 +30,8 @@ func TestSchemaUpdatesTestAddField(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Name", "value": "Users" }, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"Name": "email", "Kind": 11} } + { "op": "test", "path": "/Users/Name", "value": "Users" }, + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } ] `, }, @@ -63,8 +63,8 @@ func TestSchemaUpdatesTestAddFieldBlockedByTest(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Name", "value": "Author" }, - { "op": "add", "path": "/Users/Schema/Fields/-", "value": {"name": "Email", "Kind": 11} } + { "op": "test", "path": "/Users/Name", "value": "Author" }, + { "op": "add", "path": "/Users/Fields/-", "value": {"name": "Email", "Kind": 11} } ] `, ExpectedError: "test failed", diff --git a/tests/integration/schema/updates/test/field/simple_test.go b/tests/integration/schema/updates/test/field/simple_test.go index 24532a8718..414a472149 100644 --- a/tests/integration/schema/updates/test/field/simple_test.go +++ b/tests/integration/schema/updates/test/field/simple_test.go @@ -30,10 +30,10 @@ func TestSchemaUpdatesTestFieldNameErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Fields/1/name", "value": "Email" } + { "op": "test", "path": "/Users/Fields/1/name", "value": "Email" } ] `, - ExpectedError: "testing value /Users/Schema/Fields/1/name failed: test failed", + ExpectedError: "testing value /Users/Fields/1/name failed: test failed", }, }, } @@ -54,7 +54,7 @@ func TestSchemaUpdatesTestFieldNamePasses(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Fields/1/Name", "value": "name" } + { "op": "test", "path": "/Users/Fields/1/Name", "value": "name" } ] `, }, @@ -77,10 +77,10 @@ func TestSchemaUpdatesTestFieldErrors(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Fields/1", "value": {"Name": "name", "Kind": 11} } + { "op": "test", "path": "/Users/Fields/1", "value": {"Name": "name", "Kind": 11} } ] `, - ExpectedError: "testing value /Users/Schema/Fields/1 failed: test failed", + ExpectedError: "testing value /Users/Fields/1 failed: test failed", }, }, } @@ -101,7 +101,7 @@ func TestSchemaUpdatesTestFieldPasses(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Fields/1", "value": { + { "op": "test", "path": "/Users/Fields/1", "value": { "ID":1, "Name": "name", "Kind": 11, "Schema":"","RelationName":"","Typ":1,"RelationType":0 } } ] @@ -126,7 +126,7 @@ func TestSchemaUpdatesTestFieldPasses_UsingFieldNameAsIndex(t *testing.T) { testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Fields/name", "value": { + { "op": "test", "path": "/Users/Fields/name", "value": { "ID":1, "Kind": 11, "Schema":"","RelationName":"","Typ":1,"RelationType":0 } } ] @@ -151,7 +151,7 @@ func TestSchemaUpdatesTestFieldPasses_TargettingKindUsingFieldNameAsIndex(t *tes testUtils.SchemaPatch{ Patch: ` [ - { "op": "test", "path": "/Users/Schema/Fields/name/Kind", "value": 11 } + { "op": "test", "path": "/Users/Fields/name/Kind", "value": 11 } ] `, }, diff --git a/tests/integration/schema/with_update_set_default_test.go b/tests/integration/schema/with_update_set_default_test.go new file mode 100644 index 0000000000..55242d7a2a --- /dev/null +++ b/tests/integration/schema/with_update_set_default_test.go @@ -0,0 +1,146 @@ +// Copyright 2023 Democratized Data Foundation +// +// Use of this software is governed by the Business Source License +// included in the file licenses/BSL.txt. +// +// As of the Change Date specified in that file, in accordance with +// the Business Source License, use of this software will be governed +// by the Apache License, Version 2.0, included in the file +// licenses/APL.txt. + +package schema + +import ( + "testing" + + "github.com/sourcenetwork/immutable" + + testUtils "github.com/sourcenetwork/defradb/tests/integration" +) + +func TestSchema_WithUpdateAndSetDefaultVersionToEmptyString_Errors(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, set default version to empty string", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: "", + ExpectedError: "schema version ID can't be empty", + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchema_WithUpdateAndSetDefaultVersionToUnknownVersion_Errors(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, set default version to invalid string", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + }, + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: "does not exist", + ExpectedError: "datastore: key not found", + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchema_WithUpdateAndSetDefaultVersionToOriginal_NewFieldIsNotQueriable(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, set default version to original schema version", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: "bafkreih27vuxrj4j2tmxnibfm77wswa36xji74hwhq7deipj5rvh3qyabq", + }, + testUtils.Request{ + Request: `query { + Users { + name + email + } + }`, + // As the email field did not exist at this schema version, it will return a gql error + ExpectedError: `Cannot query field "email" on type "Users".`, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} + +func TestSchema_WithUpdateAndSetDefaultVersionToNew_AllowsQueryingOfNewField(t *testing.T) { + test := testUtils.TestCase{ + Description: "Test schema update, set default version to new schema version", + Actions: []any{ + testUtils.SchemaUpdate{ + Schema: ` + type Users { + name: String + } + `, + }, + testUtils.SchemaPatch{ + Patch: ` + [ + { "op": "add", "path": "/Users/Fields/-", "value": {"Name": "email", "Kind": 11} } + ] + `, + SetAsDefaultVersion: immutable.Some(false), + }, + testUtils.SetDefaultSchemaVersion{ + SchemaVersionID: "bafkreid5bpw7sipm63l5gxxjrs34yrq2ur5xrzyseez5rnj3pvnvkaya6m", + }, + testUtils.Request{ + Request: `query { + Users { + name + email + } + }`, + Results: []map[string]any{}, + }, + }, + } + testUtils.ExecuteTestCase(t, test) +} diff --git a/tests/integration/state.go b/tests/integration/state.go index 69bd65e2b5..ca795a2492 100644 --- a/tests/integration/state.go +++ b/tests/integration/state.go @@ -14,10 +14,13 @@ import ( "context" "testing" + "github.com/libp2p/go-libp2p/core/crypto" + "github.com/libp2p/go-libp2p/core/peer" + "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/config" "github.com/sourcenetwork/defradb/datastore" - "github.com/sourcenetwork/defradb/net" + "github.com/sourcenetwork/defradb/tests/clients" ) type state struct { @@ -50,14 +53,17 @@ type state struct { // These synchronisation channels allow async actions to track their completion. syncChans []chan struct{} + // The private keys for any nodes. + nodePrivateKeys []crypto.PrivKey + // The addresses of any nodes configured. - nodeAddresses []string + nodeAddresses []peer.AddrInfo // The configurations for any nodes nodeConfigs []config.Config // The nodes active in this test. - nodes []*net.Node + nodes []clients.Client // The paths to any file-based databases active in this test. dbPaths []string @@ -78,6 +84,9 @@ type state struct { // Indexes, by index, by collection index, by node index. indexes [][][]client.IndexDescription + + // isBench indicates wether the test is currently being benchmarked. + isBench bool } // newState returns a new fresh state for the given testCase. @@ -99,9 +108,10 @@ func newState( allActionsDone: make(chan struct{}), subscriptionResultsChans: []chan func(){}, syncChans: []chan struct{}{}, - nodeAddresses: []string{}, + nodePrivateKeys: []crypto.PrivKey{}, + nodeAddresses: []peer.AddrInfo{}, nodeConfigs: []config.Config{}, - nodes: []*net.Node{}, + nodes: []clients.Client{}, dbPaths: []string{}, collections: [][]client.Collection{}, collectionNames: collectionNames, diff --git a/tests/integration/subscription/subscription_test.go b/tests/integration/subscription/subscription_test.go index 578f558cb2..7d51a240ad 100644 --- a/tests/integration/subscription/subscription_test.go +++ b/tests/integration/subscription/subscription_test.go @@ -31,12 +31,12 @@ func TestSubscriptionWithCreateMutations(t *testing.T) { Results: []map[string]any{ { "_key": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", - "age": uint64(27), + "age": int64(27), "name": "John", }, { "_key": "bae-18def051-7f0f-5dc9-8a69-2a5e423f6b55", - "age": uint64(31), + "age": int64(31), "name": "Addo", }, }, @@ -86,7 +86,7 @@ func TestSubscriptionWithFilterAndOneCreateMutation(t *testing.T) { Results: []map[string]any{ { "_key": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", - "age": uint64(27), + "age": int64(27), "name": "John", }, }, @@ -156,7 +156,7 @@ func TestSubscriptionWithFilterAndCreateMutations(t *testing.T) { Results: []map[string]any{ { "_key": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", - "age": uint64(27), + "age": int64(27), "name": "John", }, }, @@ -225,7 +225,7 @@ func TestSubscriptionWithUpdateMutations(t *testing.T) { Results: []map[string]any{ { "_key": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", - "age": uint64(27), + "age": int64(27), "name": "John", "points": float64(45), }, @@ -283,13 +283,13 @@ func TestSubscriptionWithUpdateAllMutations(t *testing.T) { Results: []map[string]any{ { "_key": "bae-0a24cf29-b2c2-5861-9d00-abd6250c475d", - "age": uint64(27), + "age": int64(27), "name": "John", "points": float64(55), }, { "_key": "bae-cf723876-5c6a-5dcf-a877-ab288eb30d57", - "age": uint64(31), + "age": int64(31), "name": "Addo", "points": float64(55), }, diff --git a/tests/integration/test_case.go b/tests/integration/test_case.go index e17adfdeaa..112a497dc8 100644 --- a/tests/integration/test_case.go +++ b/tests/integration/test_case.go @@ -11,6 +11,8 @@ package tests import ( + "testing" + "github.com/sourcenetwork/immutable" "github.com/sourcenetwork/defradb/client" @@ -81,10 +83,51 @@ type SchemaPatch struct { // If a value is not provided the patch will be applied to all nodes. NodeID immutable.Option[int] - Patch string + Patch string + + // If SetAsDefaultVersion has a value, and that value is false then the schema version + // resulting from this patch will not be made default. + SetAsDefaultVersion immutable.Option[bool] + ExpectedError string +} + +// GetSchema is an action that fetches schema using the provided options. +type GetSchema struct { + // NodeID may hold the ID (index) of a node to apply this patch to. + // + // If a value is not provided the patch will be applied to all nodes. + NodeID immutable.Option[int] + + // The VersionID of the schema version to fetch. + // + // This option will be prioritized over all other options. + VersionID immutable.Option[string] + + // The Root of the schema versions to fetch. + // + // This option will be prioritized over Name. + Root immutable.Option[string] + + // The Name of the schema versions to fetch. + Name immutable.Option[string] + + ExpectedResults []client.SchemaDescription + ExpectedError string } +// SetDefaultSchemaVersion is an action that will set the default schema version to the +// given value. +type SetDefaultSchemaVersion struct { + // NodeID may hold the ID (index) of a node to set the default schema version on. + // + // If a value is not provided the default will be set on all nodes. + NodeID immutable.Option[int] + + SchemaVersionID string + ExpectedError string +} + // CreateDoc will attempt to create the given document in the given collection // using the set [MutationType]. type CreateDoc struct { @@ -238,6 +281,36 @@ type GetIndexes struct { ExpectedError string } +// ResultAsserter is an interface that can be implemented to provide custom result +// assertions. +type ResultAsserter interface { + // Assert will be called with the test and the result of the request. + Assert(t *testing.T, result []map[string]any) +} + +// ResultAsserterFunc is a function that can be used to implement the ResultAsserter +type ResultAsserterFunc func(*testing.T, []map[string]any) (bool, string) + +func (f ResultAsserterFunc) Assert(t *testing.T, result []map[string]any) { + f(t, result) +} + +// Benchmark is an action that will run another test action for benchmark test. +// It will run benchmarks for a base case and optimized case and assert that +// the optimized case performs better by at least the given factor. +type Benchmark struct { + // BaseCase is a test action which is the base case to benchmark. + BaseCase any + // OptimizedCase is a test action which is the optimized case to benchmark. + OptimizedCase any + // Reps is the number of times to run the benchmark. + Reps int + // FocusClients is the list of clients to run the benchmark on. + FocusClients []ClientType + // Factor is the factor by which the optimized case should be better than the base case. + Factor float64 +} + // Request represents a standard Defra (GQL) request. type Request struct { // NodeID may hold the ID (index) of a node to execute this request on. @@ -255,6 +328,9 @@ type Request struct { // The expected (data) results of the issued request. Results []map[string]any + // Asserter is an optional custom result asserter. + Asserter ResultAsserter + // Any error expected from the action. Optional. // // String can be a partial, and the test will pass if an error is returned that diff --git a/tests/integration/utils2.go b/tests/integration/utils2.go index 622478f513..a9480c15ec 100644 --- a/tests/integration/utils2.go +++ b/tests/integration/utils2.go @@ -15,14 +15,12 @@ import ( "encoding/json" "fmt" "os" - "path" "reflect" - "strconv" "strings" "testing" "time" - badger "github.com/dgraph-io/badger/v4" + "github.com/libp2p/go-libp2p/core/crypto" "github.com/sourcenetwork/immutable" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -30,48 +28,14 @@ import ( "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/datastore" badgerds "github.com/sourcenetwork/defradb/datastore/badger/v4" - "github.com/sourcenetwork/defradb/datastore/memory" - "github.com/sourcenetwork/defradb/db" "github.com/sourcenetwork/defradb/errors" - "github.com/sourcenetwork/defradb/http" "github.com/sourcenetwork/defradb/logging" "github.com/sourcenetwork/defradb/net" + changeDetector "github.com/sourcenetwork/defradb/tests/change_detector" + "github.com/sourcenetwork/defradb/tests/clients" ) -const ( - clientGoEnvName = "DEFRA_CLIENT_GO" - clientHttpEnvName = "DEFRA_CLIENT_HTTP" - memoryBadgerEnvName = "DEFRA_BADGER_MEMORY" - fileBadgerEnvName = "DEFRA_BADGER_FILE" - fileBadgerPathEnvName = "DEFRA_BADGER_FILE_PATH" - rootDBFilePathEnvName = "DEFRA_TEST_ROOT" - inMemoryEnvName = "DEFRA_IN_MEMORY" - setupOnlyEnvName = "DEFRA_SETUP_ONLY" - detectDbChangesEnvName = "DEFRA_DETECT_DATABASE_CHANGES" - repositoryEnvName = "DEFRA_CODE_REPOSITORY" - targetBranchEnvName = "DEFRA_TARGET_BRANCH" - mutationTypeEnvName = "DEFRA_MUTATION_TYPE" - documentationDirectoryName = "data_format_changes" -) - -type DatabaseType string - -const ( - badgerIMType DatabaseType = "badger-in-memory" - defraIMType DatabaseType = "defra-memory-datastore" - badgerFileType DatabaseType = "badger-file-system" -) - -type ClientType string - -const ( - // goClientType enables running the test suite using - // the go implementation of the client.DB interface. - goClientType ClientType = "go" - // httpClientType enables running the test suite using - // the http implementation of the client.DB interface. - httpClientType ClientType = "http" -) +const mutationTypeEnvName = "DEFRA_MUTATION_TYPE" // The MutationType that tests will run using. // @@ -101,75 +65,21 @@ const ( ) var ( - log = logging.MustNewLogger("tests.integration") - badgerInMemory bool - badgerFile bool - inMemoryStore bool - httpClient bool - goClient bool - mutationType MutationType + log = logging.MustNewLogger("tests.integration") + mutationType MutationType ) -const subscriptionTimeout = 1 * time.Second - -// Instantiating lenses is expensive, and our tests do not benefit from a large number of them, -// so we explicitly set it to a low value. -const lensPoolSize = 2 - -var databaseDir string -var rootDatabaseDir string - -/* -If this is set to true the integration test suite will instead of its normal profile do -the following: - -On [package] Init: - - Get the (local) latest commit from the target/parent branch // code assumes - git fetch has been done - - Check to see if a clone of that commit/branch is available in the temp dir, and - if not clone the target branch - - Check to see if there are any new .md files in the current branch's data_format_changes - dir (vs the target branch) - -For each test: - - If new documentation detected, pass the test and exit - - Create a new (test/auto-deleted) temp dir for defra to live/run in - - Run the test setup (add initial schema, docs, updates) using the target branch (test is skipped - if test does not exist in target and is new to this branch) - - Run the test request and assert results (as per normal tests) using the current branch -*/ -var DetectDbChanges bool -var SetupOnly bool - -var detectDbChangesCodeDir string -var areDatabaseFormatChangesDocumented bool -var previousTestCaseTestName string +const ( + // subscriptionTimeout is the maximum time to wait for subscription results to be returned. + subscriptionTimeout = 1 * time.Second + // Instantiating lenses is expensive, and our tests do not benefit from a large number of them, + // so we explicitly set it to a low value. + lensPoolSize = 2 +) func init() { // We use environment variables instead of flags `go test ./...` throws for all packages - // that don't have the flag defined - httpClient, _ = strconv.ParseBool(os.Getenv(clientHttpEnvName)) - goClient, _ = strconv.ParseBool(os.Getenv(clientGoEnvName)) - badgerFile, _ = strconv.ParseBool(os.Getenv(fileBadgerEnvName)) - badgerInMemory, _ = strconv.ParseBool(os.Getenv(memoryBadgerEnvName)) - inMemoryStore, _ = strconv.ParseBool(os.Getenv(inMemoryEnvName)) - DetectDbChanges, _ = strconv.ParseBool(os.Getenv(detectDbChangesEnvName)) - SetupOnly, _ = strconv.ParseBool(os.Getenv(setupOnlyEnvName)) - - var repositoryValue string - if value, ok := os.LookupEnv(repositoryEnvName); ok { - repositoryValue = value - } else { - repositoryValue = "https://github.com/sourcenetwork/defradb.git" - } - - var targetBranchValue string - if value, ok := os.LookupEnv(targetBranchEnvName); ok { - targetBranchValue = value - } else { - targetBranchValue = "develop" - } - + // that don't have the flag defined if value, ok := os.LookupEnv(mutationTypeEnvName); ok { mutationType = MutationType(value) } else { @@ -178,22 +88,6 @@ func init() { // mutation type. mutationType = CollectionSaveMutationType } - - // Default is to test go client type. - if !goClient && !httpClient { - goClient = true - } - - // Default is to test all but filesystem db types. - if !badgerInMemory && !badgerFile && !inMemoryStore && !DetectDbChanges { - badgerFile = false - badgerInMemory = true - inMemoryStore = true - } - - if DetectDbChanges { - detectDbChangesInit(repositoryValue, targetBranchValue) - } } // AssertPanic asserts that the code inside the specified PanicTestFunc panics. @@ -203,116 +97,20 @@ func init() { // // Usage: AssertPanic(t, func() { executeTestCase(t, test) }) func AssertPanic(t *testing.T, f assert.PanicTestFunc) bool { - if IsDetectingDbChanges() { + if changeDetector.Enabled { // The `assert.Panics` call will falsely fail if this test is executed during // a detect changes test run. t.Skip("Assert panic with the change detector is not currently supported.") } - if httpClient { - // The http-client will return an error instead of panicing at the moment. + if httpClient || cliClient { + // The http / cli client will return an error instead of panicing at the moment. t.Skip("Assert panic with the http client is not currently supported.") } return assert.Panics(t, f, "expected a panic, but none found.") } -func NewBadgerMemoryDB(ctx context.Context, dbopts ...db.Option) (client.DB, error) { - opts := badgerds.Options{Options: badger.DefaultOptions("").WithInMemory(true)} - rootstore, err := badgerds.NewDatastore("", &opts) - if err != nil { - return nil, err - } - - dbopts = append(dbopts, db.WithUpdateEvents(), db.WithLensPoolSize(lensPoolSize)) - - db, err := db.NewDB(ctx, rootstore, dbopts...) - if err != nil { - return nil, err - } - - return db, nil -} - -func NewInMemoryDB(ctx context.Context) (client.DB, error) { - rootstore := memory.NewDatastore(ctx) - db, err := db.NewDB(ctx, rootstore, db.WithUpdateEvents(), db.WithLensPoolSize(lensPoolSize)) - if err != nil { - return nil, err - } - - return db, nil -} - -func NewBadgerFileDB(ctx context.Context, t testing.TB) (client.DB, string, error) { - var dbPath string - if databaseDir != "" { - dbPath = databaseDir - } else if rootDatabaseDir != "" { - dbPath = path.Join(rootDatabaseDir, t.Name()) - } else { - dbPath = t.TempDir() - } - - db, err := newBadgerFileDB(ctx, t, dbPath) - return db, dbPath, err -} - -func newBadgerFileDB(ctx context.Context, t testing.TB, path string) (client.DB, error) { - opts := badgerds.Options{Options: badger.DefaultOptions(path)} - rootstore, err := badgerds.NewDatastore(path, &opts) - if err != nil { - return nil, err - } - - db, err := db.NewDB(ctx, rootstore, db.WithUpdateEvents(), db.WithLensPoolSize(lensPoolSize)) - if err != nil { - return nil, err - } - - return db, nil -} - -// GetDatabase returns the database implementation for the current -// testing state. The database type and client type on the test state -// are used to select the datastore and client implementation to use. -func GetDatabase(s *state) (cdb client.DB, path string, err error) { - switch s.dbt { - case badgerIMType: - cdb, err = NewBadgerMemoryDB(s.ctx, db.WithUpdateEvents()) - - case badgerFileType: - cdb, path, err = NewBadgerFileDB(s.ctx, s.t) - - case defraIMType: - cdb, err = NewInMemoryDB(s.ctx) - - default: - err = fmt.Errorf("invalid database type: %v", s.dbt) - } - - if err != nil { - return nil, "", err - } - - switch s.clientType { - case httpClientType: - cdb, err = http.NewWrapper(cdb) - - case goClientType: - return - - default: - err = fmt.Errorf("invalid client type: %v", s.dbt) - } - - if err != nil { - return nil, "", err - } - - return -} - // ExecuteTestCase executes the given TestCase against the configured database // instances. // @@ -322,20 +120,20 @@ func ExecuteTestCase( t *testing.T, testCase TestCase, ) { + flattenActions(&testCase) collectionNames := getCollectionNames(testCase) - - if DetectDbChanges && DetectDbChangesPreTestChecks(t, collectionNames) { - return - } - + changeDetector.PreTestChecks(t, collectionNames) skipIfMutationTypeUnsupported(t, testCase.SupportedMutationTypes) var clients []ClientType if httpClient { - clients = append(clients, httpClientType) + clients = append(clients, HTTPClientType) } if goClient { - clients = append(clients, goClientType) + clients = append(clients, GoClientType) + } + if cliClient { + clients = append(clients, CLIClientType) } var databases []DatabaseType @@ -370,9 +168,20 @@ func executeTestCase( dbt DatabaseType, clientType ClientType, ) { - log.Info(ctx, testCase.Description, logging.NewKV("Database", dbt)) + log.Info( + ctx, + testCase.Description, + logging.NewKV("database", dbt), + logging.NewKV("client", clientType), + logging.NewKV("mutationType", mutationType), + logging.NewKV("databaseDir", databaseDir), + logging.NewKV("changeDetector.Enabled", changeDetector.Enabled), + logging.NewKV("changeDetector.SetupOnly", changeDetector.SetupOnly), + logging.NewKV("changeDetector.SourceBranch", changeDetector.SourceBranch), + logging.NewKV("changeDetector.TargetBranch", changeDetector.TargetBranch), + logging.NewKV("changeDetector.Repository", changeDetector.Repository), + ) - flattenActions(&testCase) startActionIndex, endActionIndex := getActionRange(testCase) s := newState(ctx, t, testCase, dbt, clientType, collectionNames) @@ -390,107 +199,169 @@ func executeTestCase( refreshIndexes(s) for i := startActionIndex; i <= endActionIndex; i++ { - switch action := testCase.Actions[i].(type) { - case ConfigureNode: - configureNode(s, action) + performAction(s, i, testCase.Actions[i]) + } - case Restart: - restartNodes(s, i) + // Notify any active subscriptions that all requests have been sent. + close(s.allActionsDone) - case ConnectPeers: - connectPeers(s, action) + for _, resultsChan := range s.subscriptionResultsChans { + select { + case subscriptionAssert := <-resultsChan: + // We want to assert back in the main thread so failures get recorded properly + subscriptionAssert() - case ConfigureReplicator: - configureReplicator(s, action) + // a safety in case the stream hangs - we don't want the tests to run forever. + case <-time.After(subscriptionTimeout): + assert.Fail(t, "timeout occurred while waiting for data stream", testCase.Description) + } + } +} - case SubscribeToCollection: - subscribeToCollection(s, action) +func performAction( + s *state, + actionIndex int, + act any, +) { + switch action := act.(type) { + case ConfigureNode: + configureNode(s, action) - case UnsubscribeToCollection: - unsubscribeToCollection(s, action) + case Restart: + restartNodes(s, actionIndex) - case GetAllP2PCollections: - getAllP2PCollections(s, action) + case ConnectPeers: + connectPeers(s, action) - case SchemaUpdate: - updateSchema(s, action) + case ConfigureReplicator: + configureReplicator(s, action) - case SchemaPatch: - patchSchema(s, action) + case DeleteReplicator: + deleteReplicator(s, action) - case ConfigureMigration: - configureMigration(s, action) + case SubscribeToCollection: + subscribeToCollection(s, action) - case GetMigrations: - getMigrations(s, action) + case UnsubscribeToCollection: + unsubscribeToCollection(s, action) - case CreateDoc: - createDoc(s, action) + case GetAllP2PCollections: + getAllP2PCollections(s, action) - case DeleteDoc: - deleteDoc(s, action) + case SchemaUpdate: + updateSchema(s, action) - case UpdateDoc: - updateDoc(s, action) + case SchemaPatch: + patchSchema(s, action) - case CreateIndex: - createIndex(s, action) + case GetSchema: + getSchema(s, action) - case DropIndex: - dropIndex(s, action) + case SetDefaultSchemaVersion: + setDefaultSchemaVersion(s, action) - case GetIndexes: - getIndexes(s, action) + case ConfigureMigration: + configureMigration(s, action) - case BackupExport: - backupExport(s, action) + case GetMigrations: + getMigrations(s, action) - case BackupImport: - backupImport(s, action) + case CreateDoc: + createDoc(s, action) - case TransactionCommit: - commitTransaction(s, action) + case DeleteDoc: + deleteDoc(s, action) - case SubscriptionRequest: - executeSubscriptionRequest(s, action) + case UpdateDoc: + updateDoc(s, action) - case Request: - executeRequest(s, action) + case CreateIndex: + createIndex(s, action) - case ExplainRequest: - executeExplainRequest(s, action) + case DropIndex: + dropIndex(s, action) - case IntrospectionRequest: - assertIntrospectionResults(s, action) + case GetIndexes: + getIndexes(s, action) - case ClientIntrospectionRequest: - assertClientIntrospectionResults(s, action) + case BackupExport: + backupExport(s, action) - case WaitForSync: - waitForSync(s, action) + case BackupImport: + backupImport(s, action) - case SetupComplete: - // no-op, just continue. + case TransactionCommit: + commitTransaction(s, action) - default: - t.Fatalf("Unknown action type %T", action) - } - } + case SubscriptionRequest: + executeSubscriptionRequest(s, action) - // Notify any active subscriptions that all requests have been sent. - close(s.allActionsDone) + case Request: + executeRequest(s, action) - for _, resultsChan := range s.subscriptionResultsChans { - select { - case subscriptionAssert := <-resultsChan: - // We want to assert back in the main thread so failures get recorded properly - subscriptionAssert() + case ExplainRequest: + executeExplainRequest(s, action) - // a safety in case the stream hangs - we don't want the tests to run forever. - case <-time.After(subscriptionTimeout): - assert.Fail(t, "timeout occurred while waiting for data stream", testCase.Description) + case IntrospectionRequest: + assertIntrospectionResults(s, action) + + case ClientIntrospectionRequest: + assertClientIntrospectionResults(s, action) + + case WaitForSync: + waitForSync(s, action) + + case Benchmark: + benchmarkAction(s, actionIndex, action) + + case SetupComplete: + // no-op, just continue. + + default: + s.t.Fatalf("Unknown action type %T", action) + } +} + +func benchmarkAction( + s *state, + actionIndex int, + bench Benchmark, +) { + if s.dbt == defraIMType { + // Benchmarking makes no sense for test in-memory storage + return + } + if len(bench.FocusClients) > 0 { + isFound := false + for _, clientType := range bench.FocusClients { + if s.clientType == clientType { + isFound = true + break + } + } + if !isFound { + return + } + } + + runBench := func(benchCase any) time.Duration { + startTime := time.Now() + for i := 0; i < bench.Reps; i++ { + performAction(s, actionIndex, benchCase) } + return time.Since(startTime) } + + s.isBench = true + defer func() { s.isBench = false }() + + baseElapsedTime := runBench(bench.BaseCase) + optimizedElapsedTime := runBench(bench.OptimizedCase) + + factoredBaseTime := int64(float64(baseElapsedTime) / bench.Factor) + assert.Greater(s.t, factoredBaseTime, optimizedElapsedTime, + "Optimized case should be faster at least by factor of %.2f than the base case. Base: %d, Optimized: %d (μs)", + bench.Factor, optimizedElapsedTime.Microseconds(), baseElapsedTime.Microseconds()) } // getCollectionNames gets an ordered, unique set of collection names across all nodes @@ -552,23 +423,19 @@ func closeNodes( s *state, ) { for _, node := range s.nodes { - if node.Peer != nil { - err := node.Close() - require.NoError(s.t, err) - } - node.DB.Close(s.ctx) + node.Close() } } // getNodes gets the set of applicable nodes for the given nodeID. // // If nodeID has a value it will return that node only, otherwise all nodes will be returned. -func getNodes(nodeID immutable.Option[int], nodes []*net.Node) []*net.Node { +func getNodes(nodeID immutable.Option[int], nodes []clients.Client) []clients.Client { if !nodeID.HasValue() { return nodes } - return []*net.Node{nodes[nodeID.Value()]} + return []clients.Client{nodes[nodeID.Value()]} } // getNodeCollections gets the set of applicable collections for the given nodeID. @@ -632,7 +499,7 @@ func getActionRange(testCase TestCase) (int, int) { startIndex := 0 endIndex := len(testCase.Actions) - 1 - if !DetectDbChanges { + if !changeDetector.Enabled { return startIndex, endIndex } @@ -656,7 +523,7 @@ ActionLoop: } } - if SetupOnly { + if changeDetector.SetupOnly { if setupCompleteIndex > -1 { endIndex = setupCompleteIndex } else if firstNonSetupIndex > -1 { @@ -696,12 +563,13 @@ func setStartingNodes( // If nodes have not been explicitly configured via actions, setup a default one. if !hasExplicitNode { - db, path, err := GetDatabase(s) + db, path, err := setupDatabase(s) + require.Nil(s.t, err) + + c, err := setupClient(s, &net.Node{DB: db}) require.Nil(s.t, err) - s.nodes = append(s.nodes, &net.Node{ - DB: db, - }) + s.nodes = append(s.nodes, c) s.dbPaths = append(s.dbPaths, path) } } @@ -719,40 +587,42 @@ func restartNodes( for i := len(s.nodes) - 1; i >= 0; i-- { originalPath := databaseDir databaseDir = s.dbPaths[i] - db, _, err := GetDatabase(s) + db, _, err := setupDatabase(s) require.Nil(s.t, err) databaseDir = originalPath if len(s.nodeConfigs) == 0 { // If there are no explicit node configuration actions the node will be // basic (i.e. no P2P stuff) and can be yielded now. - s.nodes[i] = &net.Node{ - DB: db, - } + c, err := setupClient(s, &net.Node{DB: db}) + require.NoError(s.t, err) + s.nodes[i] = c continue } + key := s.nodePrivateKeys[i] cfg := s.nodeConfigs[i] // We need to make sure the node is configured with its old address, otherwise // a new one may be selected and reconnnection to it will fail. - cfg.Net.P2PAddress = strings.Split(s.nodeAddresses[i], "/p2p/")[0] + cfg.Net.P2PAddress = s.nodeAddresses[i].Addrs[0].String() + var n *net.Node n, err = net.NewNode( s.ctx, db, net.WithConfig(&cfg), + net.WithPrivateKey(key), ) require.NoError(s.t, err) if err := n.Start(); err != nil { - closeErr := n.Close() - if closeErr != nil { - s.t.Fatal(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr) - } + n.Close() require.NoError(s.t, err) } - s.nodes[i] = n + c, err := setupClient(s, n) + require.NoError(s.t, err) + s.nodes[i] = c } // The index of the action after the last wait action before the current restart action. @@ -803,7 +673,7 @@ func refreshCollections( for nodeID, node := range s.nodes { s.collections[nodeID] = make([]client.Collection, len(s.collectionNames)) - allCollections, err := node.DB.GetAllCollections(s.ctx) + allCollections, err := node.GetAllCollections(s.ctx) require.Nil(s.t, err) for i, collectionName := range s.collectionNames { @@ -825,19 +695,17 @@ func configureNode( s *state, action ConfigureNode, ) { - if DetectDbChanges { + if changeDetector.Enabled { // We do not yet support the change detector for tests running across multiple nodes. s.t.SkipNow() return } cfg := action() - // WARNING: This is a horrible hack both deduplicates/randomizes peer IDs - // And affects where libp2p(?) stores some values on the file system, even when using - // an in memory store. - cfg.Datastore.Badger.Path = s.t.TempDir() + db, path, err := setupDatabase(s) //disable change dector, or allow it? + require.NoError(s.t, err) - db, path, err := GetDatabase(s) //disable change dector, or allow it? + privateKey, _, err := crypto.GenerateKeyPair(crypto.Ed25519, 0) require.NoError(s.t, err) var n *net.Node @@ -846,22 +714,24 @@ func configureNode( s.ctx, db, net.WithConfig(&cfg), + net.WithPrivateKey(privateKey), ) require.NoError(s.t, err) + log.Info(s.ctx, "Starting P2P node", logging.NewKV("P2P address", n.PeerInfo())) if err := n.Start(); err != nil { - closeErr := n.Close() - if closeErr != nil { - s.t.Fatal(fmt.Sprintf("unable to start P2P listeners: %v: problem closing node", err), closeErr) - } + n.Close() require.NoError(s.t, err) } - address := fmt.Sprintf("%s/p2p/%s", n.ListenAddrs()[0].String(), n.PeerID()) - s.nodeAddresses = append(s.nodeAddresses, address) + s.nodeAddresses = append(s.nodeAddresses, n.PeerInfo()) s.nodeConfigs = append(s.nodeConfigs, cfg) + s.nodePrivateKeys = append(s.nodePrivateKeys, privateKey) + + c, err := setupClient(s, n) + require.NoError(s.t, err) - s.nodes = append(s.nodes, n) + s.nodes = append(s.nodes, c) s.dbPaths = append(s.dbPaths, path) } @@ -900,6 +770,11 @@ func refreshDocuments( // Just use the collection from the first relevant node, as all will be the same for this // purpose. collection := getNodeCollections(action.NodeID, s.collections)[0][action.CollectionID] + if err := doc.RemapAliasFieldsAndDockey(collection.Schema().Fields); err != nil { + // If an err has been returned, ignore it - it may be expected and if not + // the test will fail later anyway + continue + } // The document may have been mutated by other actions, so to be sure we have the latest // version without having to worry about the individual update mechanics we fetch it. @@ -1043,7 +918,7 @@ func updateSchema( action SchemaUpdate, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - _, err := node.DB.AddSchema(s.ctx, action.Schema) + _, err := node.AddSchema(s.ctx, action.Schema) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -1059,7 +934,14 @@ func patchSchema( action SchemaPatch, ) { for _, node := range getNodes(action.NodeID, s.nodes) { - err := node.DB.PatchSchema(s.ctx, action.Patch) + var setAsDefaultVersion bool + if action.SetAsDefaultVersion.HasValue() { + setAsDefaultVersion = action.SetAsDefaultVersion.Value() + } else { + setAsDefaultVersion = true + } + + err := node.PatchSchema(s.ctx, action.Patch, setAsDefaultVersion) expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) @@ -1070,13 +952,57 @@ func patchSchema( refreshIndexes(s) } +func getSchema( + s *state, + action GetSchema, +) { + for _, node := range getNodes(action.NodeID, s.nodes) { + var results []client.SchemaDescription + var err error + switch { + case action.VersionID.HasValue(): + result, e := node.GetSchemaByVersionID(s.ctx, action.VersionID.Value()) + err = e + results = []client.SchemaDescription{result} + case action.Root.HasValue(): + results, err = node.GetSchemasByRoot(s.ctx, action.Root.Value()) + case action.Name.HasValue(): + results, err = node.GetSchemasByName(s.ctx, action.Name.Value()) + default: + results, err = node.GetAllSchemas(s.ctx) + } + + expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) + + if !expectedErrorRaised { + require.Equal(s.t, action.ExpectedResults, results) + } + } +} + +func setDefaultSchemaVersion( + s *state, + action SetDefaultSchemaVersion, +) { + for _, node := range getNodes(action.NodeID, s.nodes) { + err := node.SetDefaultSchemaVersion(s.ctx, action.SchemaVersionID) + expectedErrorRaised := AssertError(s.t, s.testCase.Description, err, action.ExpectedError) + + assertExpectedErrorRaised(s.t, s.testCase.Description, action.ExpectedError, expectedErrorRaised) + } + + refreshCollections(s) + refreshIndexes(s) +} + // createDoc creates a document using the chosen [mutationType] and caches it in the // test state object. func createDoc( s *state, action CreateDoc, ) { - var mutation func(*state, CreateDoc, *net.Node, []client.Collection) (*client.Document, error) + var mutation func(*state, CreateDoc, client.P2P, []client.Collection) (*client.Document, error) switch mutationType { case CollectionSaveMutationType: @@ -1117,7 +1043,7 @@ func createDoc( func createDocViaColSave( s *state, action CreateDoc, - node *net.Node, + node client.P2P, collections []client.Collection, ) (*client.Document, error) { var err error @@ -1132,7 +1058,7 @@ func createDocViaColSave( func createDocViaColCreate( s *state, action CreateDoc, - node *net.Node, + node client.P2P, collections []client.Collection, ) (*client.Document, error) { var err error @@ -1147,7 +1073,7 @@ func createDocViaColCreate( func createDocViaGQL( s *state, action CreateDoc, - node *net.Node, + node client.P2P, collections []client.Collection, ) (*client.Document, error) { collection := collections[action.CollectionID] @@ -1165,7 +1091,7 @@ func createDocViaGQL( escapedJson, ) - db := getStore(s, node.DB, immutable.None[int](), action.ExpectedError) + db := getStore(s, node, immutable.None[int](), action.ExpectedError) result := db.ExecRequest(s.ctx, request) if len(result.GQL.Errors) > 0 { @@ -1217,7 +1143,7 @@ func updateDoc( s *state, action UpdateDoc, ) { - var mutation func(*state, UpdateDoc, *net.Node, []client.Collection) error + var mutation func(*state, UpdateDoc, client.P2P, []client.Collection) error switch mutationType { case CollectionSaveMutationType: @@ -1247,7 +1173,7 @@ func updateDoc( func updateDocViaColSave( s *state, action UpdateDoc, - node *net.Node, + node client.P2P, collections []client.Collection, ) error { doc := s.documents[action.CollectionID][action.DocID] @@ -1263,7 +1189,7 @@ func updateDocViaColSave( func updateDocViaColUpdate( s *state, action UpdateDoc, - node *net.Node, + node client.P2P, collections []client.Collection, ) error { doc := s.documents[action.CollectionID][action.DocID] @@ -1279,7 +1205,7 @@ func updateDocViaColUpdate( func updateDocViaGQL( s *state, action UpdateDoc, - node *net.Node, + node client.P2P, collections []client.Collection, ) error { doc := s.documents[action.CollectionID][action.DocID] @@ -1299,7 +1225,7 @@ func updateDocViaGQL( escapedJson, ) - db := getStore(s, node.DB, immutable.None[int](), action.ExpectedError) + db := getStore(s, node, immutable.None[int](), action.ExpectedError) result := db.ExecRequest(s.ctx, request) if len(result.GQL.Errors) > 0 { @@ -1399,7 +1325,7 @@ func backupExport( err := withRetry( actionNodes, nodeID, - func() error { return node.DB.BasicExport(s.ctx, &action.Config) }, + func() error { return node.BasicExport(s.ctx, &action.Config) }, ) expectedErrorRaised = AssertError(s.t, s.testCase.Description, err, action.ExpectedError) @@ -1429,7 +1355,7 @@ func backupImport( err := withRetry( actionNodes, nodeID, - func() error { return node.DB.BasicImport(s.ctx, action.Filepath) }, + func() error { return node.BasicImport(s.ctx, action.Filepath) }, ) expectedErrorRaised = AssertError(s.t, s.testCase.Description, err, action.ExpectedError) } @@ -1444,7 +1370,7 @@ func backupImport( // about this in our tests so we just retry a few times until it works (or the // retry limit is breached - important incase this is a different error) func withRetry( - nodes []*net.Node, + nodes []clients.Client, nodeID int, action func() error, ) error { @@ -1515,7 +1441,7 @@ func executeRequest( ) { var expectedErrorRaised bool for nodeID, node := range getNodes(action.NodeID, s.nodes) { - db := getStore(s, node.DB, action.TransactionID, action.ExpectedError) + db := getStore(s, node, action.TransactionID, action.ExpectedError) result := db.ExecRequest(s.ctx, action.Request) anyOfByFieldKey := map[docFieldKey][]any{} @@ -1524,6 +1450,7 @@ func executeRequest( &result.GQL, action.Results, action.ExpectedError, + action.Asserter, nodeID, anyOfByFieldKey, ) @@ -1547,7 +1474,7 @@ func executeSubscriptionRequest( subscriptionAssert := make(chan func()) for _, node := range getNodes(action.NodeID, s.nodes) { - result := node.DB.ExecRequest(s.ctx, action.Request) + result := node.ExecRequest(s.ctx, action.Request) if AssertErrors(s.t, s.testCase.Description, result.GQL.Errors, action.ExpectedError) { return } @@ -1589,6 +1516,7 @@ func executeSubscriptionRequest( finalResult, action.Results, action.ExpectedError, + nil, // anyof is not yet supported by subscription requests 0, map[docFieldKey][]any{}, @@ -1661,10 +1589,12 @@ func assertRequestResults( result *client.GQLResult, expectedResults []map[string]any, expectedError string, + asserter ResultAsserter, nodeID int, anyOfByField map[docFieldKey][]any, ) bool { - if AssertErrors(s.t, s.testCase.Description, result.Errors, expectedError) { + // we skip assertion benchmark because you don't specify expected result for benchmark. + if AssertErrors(s.t, s.testCase.Description, result.Errors, expectedError) || s.isBench { return true } @@ -1675,9 +1605,16 @@ func assertRequestResults( // Note: if result.Data == nil this panics (the panic seems useful while testing). resultantData := result.Data.([]map[string]any) + if asserter != nil { + asserter.Assert(s.t, resultantData) + return true + } + log.Info(s.ctx, "", logging.NewKV("RequestResults", result.Data)) - require.Equal(s.t, len(expectedResults), len(resultantData), s.testCase.Description) + // compare results + require.Equal(s.t, len(expectedResults), len(resultantData), + s.testCase.Description+" \n(number of results don't match)") for docIndex, result := range resultantData { expectedResult := expectedResults[docIndex] @@ -1718,7 +1655,7 @@ func assertIntrospectionResults( action IntrospectionRequest, ) bool { for _, node := range getNodes(action.NodeID, s.nodes) { - result := node.DB.ExecRequest(s.ctx, action.Request) + result := node.ExecRequest(s.ctx, action.Request) if AssertErrors(s.t, s.testCase.Description, result.GQL.Errors, action.ExpectedError) { return true @@ -1749,7 +1686,7 @@ func assertClientIntrospectionResults( action ClientIntrospectionRequest, ) bool { for _, node := range getNodes(action.NodeID, s.nodes) { - result := node.DB.ExecRequest(s.ctx, action.Request) + result := node.ExecRequest(s.ctx, action.Request) if AssertErrors(s.t, s.testCase.Description, result.GQL.Errors, action.ExpectedError) { return true diff --git a/tests/lenses/rust_wasm32_copy/src/lib.rs b/tests/lenses/rust_wasm32_copy/src/lib.rs index 2f2b407481..c1084c9a5a 100644 --- a/tests/lenses/rust_wasm32_copy/src/lib.rs +++ b/tests/lenses/rust_wasm32_copy/src/lib.rs @@ -84,8 +84,9 @@ fn try_transform(ptr: *mut u8) -> Result>, Box> { .ok_or(ModuleError::PropertyNotFoundError{requested: params.src.clone()})? .clone(); - input.insert(params.dst, value); + let mut result = input.clone(); + result.insert(params.dst, value); - let result_json = serde_json::to_vec(&input)?; + let result_json = serde_json::to_vec(&result)?; Ok(Some(result_json)) } diff --git a/tools/defradb.containerfile b/tools/defradb.containerfile index c2cbaeb80e..272a7e67ba 100644 --- a/tools/defradb.containerfile +++ b/tools/defradb.containerfile @@ -2,6 +2,13 @@ # An image to run defradb. +# Stage: PLAYGROUND_BUILD +FROM docker.io/node:20 AS PLAYGROUND_BUILD +WORKDIR /repo/ +COPY playground/ ./ +RUN npm install +RUN npm run build + # Stage: BUILD # Several steps are involved to enable caching and because of the behavior of COPY regarding directories. FROM docker.io/golang:1.20 AS BUILD @@ -9,6 +16,8 @@ WORKDIR /repo/ COPY go.mod go.sum Makefile ./ RUN make deps:modules COPY . . +COPY --from=PLAYGROUND_BUILD /repo/dist /repo/playground/dist/ +ENV BUILD_TAGS=playground RUN make build # Stage: RUN diff --git a/tools/goreleaser.containerfile b/tools/goreleaser.containerfile new file mode 100644 index 0000000000..d8a8af4e6b --- /dev/null +++ b/tools/goreleaser.containerfile @@ -0,0 +1,17 @@ +# syntax=docker/dockerfile:1 + +# An image to run defradb. + +# Stage: RUN +FROM debian:bookworm-slim +COPY defradb /defradb + +# Documents which ports are normally used. +# To publish the ports: `docker run -p 9181:9181` ... +EXPOSE 9161 +EXPOSE 9171 +EXPOSE 9181 + +# Default command provided for convenience. +# e.g. docker run -p 9181:9181 source/defradb start --url 0.0.0.0:9181 +ENTRYPOINT [ "/defradb" ] diff --git a/version/version.go b/version/version.go index a6fe7ea548..67538d302b 100644 --- a/version/version.go +++ b/version/version.go @@ -17,9 +17,9 @@ import ( "fmt" "strings" - "github.com/sourcenetwork/defradb/api/http" "github.com/sourcenetwork/defradb/client" "github.com/sourcenetwork/defradb/core/net" + "github.com/sourcenetwork/defradb/http" ) const commitHashMaxLength = 8