diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index a0f8319..b61bd7f 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -1,10 +1,10 @@
-name: Go CI
+name: Go Tests
on:
push:
- branches: [ master, dev ]
+ branches:
+ - master
pull_request:
- branches: [ '**' ]
jobs:
build:
@@ -19,6 +19,9 @@ jobs:
with:
go-version-file: ./go.mod
+ - name: Get
+ run: go get ./...
+
- name: Build
run: go build ./...
diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml
new file mode 100644
index 0000000..7e79f3a
--- /dev/null
+++ b/.github/workflows/golangci-lint.yml
@@ -0,0 +1,23 @@
+name: golangci-lint
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+
+permissions:
+ contents: read
+
+jobs:
+ golangci:
+ name: lint
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v4
+ - uses: actions/setup-go@v5
+ with:
+ go-version: stable
+ - name: golangci-lint
+ uses: golangci/golangci-lint-action@v6
+ with:
+ version: v1.60
\ No newline at end of file
diff --git a/.golangci.yml b/.golangci.yml
new file mode 100644
index 0000000..c3c2fc5
--- /dev/null
+++ b/.golangci.yml
@@ -0,0 +1,340 @@
+# This code is licensed under the terms of the MIT license https://opensource.org/license/mit
+# Copyright (c) 2021 Marat Reymers
+
+## Golden config for golangci-lint v1.59.1
+
+run:
+ # Timeout for analysis, e.g. 30s, 5m.
+ # Default: 1m
+ timeout: 3m
+
+
+# This file contains only configs which differ from defaults.
+# All possible options can be found here https://github.com/golangci/golangci-lint/blob/master/.golangci.reference.yml
+linters-settings:
+ cyclop:
+ # The maximal code complexity to report.
+ # Default: 10
+ max-complexity: 30
+ # The maximal average package complexity.
+ # If it's higher than 0.0 (float) the check is enabled
+ # Default: 0.0
+ package-average: 10.0
+
+ errcheck:
+ # Report about not checking of errors in type assertions: `a := b.(MyStruct)`.
+ # Such cases aren't reported by default.
+ # Default: false
+ check-type-assertions: true
+
+ exhaustive:
+ # Program elements to check for exhaustiveness.
+ # Default: [ switch ]
+ check:
+ - switch
+ - map
+
+ exhaustruct:
+ # List of regular expressions to exclude struct packages and their names from checks.
+ # Regular expressions must match complete canonical struct package/name/structname.
+ # Default: []
+ exclude:
+ # std libs
+ - "^net/http.Client$"
+ - "^net/http.Cookie$"
+ - "^net/http.Request$"
+ - "^net/http.Response$"
+ - "^net/http.Server$"
+ - "^net/http.Transport$"
+ - "^net/url.URL$"
+ - "^os/exec.Cmd$"
+ - "^reflect.StructField$"
+ # public libs
+ - "^github.com/Shopify/sarama.Config$"
+ - "^github.com/Shopify/sarama.ProducerMessage$"
+ - "^github.com/mitchellh/mapstructure.DecoderConfig$"
+ - "^github.com/prometheus/client_golang/.+Opts$"
+ - "^github.com/spf13/cobra.Command$"
+ - "^github.com/spf13/cobra.CompletionOptions$"
+ - "^github.com/stretchr/testify/mock.Mock$"
+ - "^github.com/testcontainers/testcontainers-go.+Request$"
+ - "^github.com/testcontainers/testcontainers-go.FromDockerfile$"
+ - "^golang.org/x/tools/go/analysis.Analyzer$"
+ - "^google.golang.org/protobuf/.+Options$"
+ - "^gopkg.in/yaml.v3.Node$"
+
+ funlen:
+ # Checks the number of lines in a function.
+ # If lower than 0, disable the check.
+ # Default: 60
+ lines: 100
+ # Checks the number of statements in a function.
+ # If lower than 0, disable the check.
+ # Default: 40
+ statements: 50
+ # Ignore comments when counting lines.
+ # Default false
+ ignore-comments: true
+
+ gocognit:
+ # Minimal code complexity to report.
+ # Default: 30 (but we recommend 10-20)
+ min-complexity: 20
+
+ gocritic:
+ # Settings passed to gocritic.
+ # The settings key is the name of a supported gocritic checker.
+ # The list of supported checkers can be find in https://go-critic.github.io/overview.
+ settings:
+ captLocal:
+ # Whether to restrict checker to params only.
+ # Default: true
+ paramsOnly: false
+ underef:
+ # Whether to skip (*x).method() calls where x is a pointer receiver.
+ # Default: true
+ skipRecvDeref: false
+
+ gomodguard:
+ blocked:
+ # List of blocked modules.
+ # Default: []
+ modules:
+ - github.com/golang/protobuf:
+ recommendations:
+ - google.golang.org/protobuf
+ reason: "see https://developers.google.com/protocol-buffers/docs/reference/go/faq#modules"
+ - github.com/satori/go.uuid:
+ recommendations:
+ - github.com/google/uuid
+ reason: "satori's package is not maintained"
+ - github.com/gofrs/uuid:
+ recommendations:
+ - github.com/gofrs/uuid/v5
+ reason: "gofrs' package was not go module before v5"
+
+ govet:
+ # Enable all analyzers.
+ # Default: false
+ enable-all: true
+ # Disable analyzers by name.
+ # Run `go tool vet help` to see all analyzers.
+ # Default: []
+ disable:
+ - fieldalignment # too strict
+ - shadow # too noisy
+
+ inamedparam:
+ # Skips check for interface methods with only a single parameter.
+ # Default: false
+ skip-single-param: true
+
+ mnd:
+ # List of function patterns to exclude from analysis.
+ # Values always ignored: `time.Date`,
+ # `strconv.FormatInt`, `strconv.FormatUint`, `strconv.FormatFloat`,
+ # `strconv.ParseInt`, `strconv.ParseUint`, `strconv.ParseFloat`.
+ # Default: []
+ ignored-functions:
+ - args.Error
+ - flag.Arg
+ - flag.Duration.*
+ - flag.Float.*
+ - flag.Int.*
+ - flag.Uint.*
+ - os.Chmod
+ - os.Mkdir.*
+ - os.OpenFile
+ - os.WriteFile
+ - prometheus.ExponentialBuckets.*
+ - prometheus.LinearBuckets
+
+ nakedret:
+ # Make an issue if func has more lines of code than this setting, and it has naked returns.
+ # Default: 30
+ max-func-lines: 0
+
+ nolintlint:
+ # Exclude following linters from requiring an explanation.
+ # Default: []
+ allow-no-explanation: [ funlen, gocognit, lll ]
+ # Enable to require an explanation of nonzero length after each nolint directive.
+ # Default: false
+ require-explanation: true
+ # Enable to require nolint directives to mention the specific linter being suppressed.
+ # Default: false
+ require-specific: true
+
+ perfsprint:
+ # Optimizes into strings concatenation.
+ # Default: true
+ strconcat: false
+
+ rowserrcheck:
+ # database/sql is always checked
+ # Default: []
+ packages:
+ - github.com/jmoiron/sqlx
+
+ sloglint:
+ # Enforce not using global loggers.
+ # Values:
+ # - "": disabled
+ # - "all": report all global loggers
+ # - "default": report only the default slog logger
+ # https://github.com/go-simpler/sloglint?tab=readme-ov-file#no-global
+ # Default: ""
+ no-global: "all"
+ # Enforce using methods that accept a context.
+ # Values:
+ # - "": disabled
+ # - "all": report all contextless calls
+ # - "scope": report only if a context exists in the scope of the outermost function
+ # https://github.com/go-simpler/sloglint?tab=readme-ov-file#context-only
+ # Default: ""
+ context: "scope"
+
+ tenv:
+ # The option `all` will run against whole test files (`_test.go`) regardless of method/function signatures.
+ # Otherwise, only methods that take `*testing.T`, `*testing.B`, and `testing.TB` as arguments are checked.
+ # Default: false
+ all: true
+
+
+linters:
+ disable-all: true
+ enable:
+ ## enabled by default
+ - errcheck # checking for unchecked errors, these unchecked errors can be critical bugs in some cases
+ - gosimple # specializes in simplifying a code
+ - govet # reports suspicious constructs, such as Printf calls whose arguments do not align with the format string
+ - ineffassign # detects when assignments to existing variables are not used
+ - staticcheck # is a go vet on steroids, applying a ton of static analysis checks
+ - typecheck # like the front-end of a Go compiler, parses and type-checks Go code
+ - unused # checks for unused constants, variables, functions and types
+ ## disabled by default
+ - asasalint # checks for pass []any as any in variadic func(...any)
+ - asciicheck # checks that your code does not contain non-ASCII identifiers
+ - bidichk # checks for dangerous unicode character sequences
+ - bodyclose # checks whether HTTP response body is closed successfully
+ - canonicalheader # checks whether net/http.Header uses canonical header
+ - cyclop # checks function and package cyclomatic complexity
+ - dupl # tool for code clone detection
+ - durationcheck # checks for two durations multiplied together
+ - errname # checks that sentinel errors are prefixed with the Err and error types are suffixed with the Error
+ - errorlint # finds code that will cause problems with the error wrapping scheme introduced in Go 1.13
+ - exhaustive # checks exhaustiveness of enum switch statements
+ - fatcontext # detects nested contexts in loops
+ - forbidigo # forbids identifiers
+ - funlen # tool for detection of long functions
+ - gocheckcompilerdirectives # validates go compiler directive comments (//go:)
+ - gochecknoglobals # checks that no global variables exist
+ - gochecknoinits # checks that no init functions are present in Go code
+ - gochecksumtype # checks exhaustiveness on Go "sum types"
+ - gocognit # computes and checks the cognitive complexity of functions
+ - goconst # finds repeated strings that could be replaced by a constant
+ - gocritic # provides diagnostics that check for bugs, performance and style issues
+ - gocyclo # computes and checks the cyclomatic complexity of functions
+ - godot # checks if comments end in a period
+ - goimports # in addition to fixing imports, goimports also formats your code in the same style as gofmt
+ - gomoddirectives # manages the use of 'replace', 'retract', and 'excludes' directives in go.mod
+ - gomodguard # allow and block lists linter for direct Go module dependencies. This is different from depguard where there are different block types for example version constraints and module recommendations
+ - goprintffuncname # checks that printf-like functions are named with f at the end
+ - intrange # finds places where for loops could make use of an integer range
+ - lll # reports long lines
+ - loggercheck # checks key value pairs for common logger libraries (kitlog,klog,logr,zap)
+ - makezero # finds slice declarations with non-zero initial length
+ - mirror # reports wrong mirror patterns of bytes/strings usage
+ - mnd # detects magic numbers
+ - musttag # enforces field tags in (un)marshaled structs
+ - nakedret # finds naked returns in functions greater than a specified function length
+ - nestif # reports deeply nested if statements
+ - nilerr # finds the code that returns nil even if it checks that the error is not nil
+ - nilnil # checks that there is no simultaneous return of nil error and an invalid value
+ - noctx # finds sending http request without context.Context
+ - nolintlint # reports ill-formed or insufficient nolint directives
+ - nonamedreturns # reports all named returns
+ - nosprintfhostport # checks for misuse of Sprintf to construct a host with port in a URL
+ - perfsprint # checks that fmt.Sprintf can be replaced with a faster alternative
+ - predeclared # finds code that shadows one of Go's predeclared identifiers
+ - promlinter # checks Prometheus metrics naming via promlint
+ - protogetter # reports direct reads from proto message fields when getters should be used
+ - reassign # checks that package variables are not reassigned
+ - revive # fast, configurable, extensible, flexible, and beautiful linter for Go, drop-in replacement of golint
+ - rowserrcheck # checks whether Err of rows is checked successfully
+ - sloglint # ensure consistent code style when using log/slog
+ - spancheck # checks for mistakes with OpenTelemetry/Census spans
+ - sqlclosecheck # checks that sql.Rows and sql.Stmt are closed
+ - stylecheck # is a replacement for golint
+ - tenv # detects using os.Setenv instead of t.Setenv since Go1.17
+ - testableexamples # checks if examples are testable (have an expected output)
+ - testifylint # checks usage of github.com/stretchr/testify
+ - testpackage # makes you use a separate _test package
+ - tparallel # detects inappropriate usage of t.Parallel() method in your Go test codes
+ - unconvert # removes unnecessary type conversions
+ - unparam # reports unused function parameters
+ - usestdlibvars # detects the possibility to use variables/constants from the Go standard library
+ - wastedassign # finds wasted assignment statements
+ - whitespace # detects leading and trailing whitespace
+
+ ## you may want to enable
+ #- decorder # checks declaration order and count of types, constants, variables and functions
+ #- exhaustruct # [highly recommend to enable] checks if all structure fields are initialized
+ #- gci # controls golang package import order and makes it always deterministic
+ #- ginkgolinter # [if you use ginkgo/gomega] enforces standards of using ginkgo and gomega
+ #- godox # detects FIXME, TODO and other comment keywords
+ #- goheader # checks is file header matches to pattern
+ #- inamedparam # [great idea, but too strict, need to ignore a lot of cases by default] reports interfaces with unnamed method parameters
+ #- interfacebloat # checks the number of methods inside an interface
+ #- ireturn # accept interfaces, return concrete types
+ #- prealloc # [premature optimization, but can be used in some cases] finds slice declarations that could potentially be preallocated
+ #- tagalign # checks that struct tags are well aligned
+ #- varnamelen # [great idea, but too many false positives] checks that the length of a variable's name matches its scope
+ #- wrapcheck # checks that errors returned from external packages are wrapped
+ #- zerologlint # detects the wrong usage of zerolog that a user forgets to dispatch zerolog.Event
+
+ ## disabled
+ #- containedctx # detects struct contained context.Context field
+ #- contextcheck # [too many false positives] checks the function whether use a non-inherited context
+ #- copyloopvar # [not necessary from Go 1.22] detects places where loop variables are copied
+ #- depguard # [replaced by gomodguard] checks if package imports are in a list of acceptable packages
+ #- dogsled # checks assignments with too many blank identifiers (e.g. x, _, _, _, := f())
+ #- dupword # [useless without config] checks for duplicate words in the source code
+ #- err113 # [too strict] checks the errors handling expressions
+ #- errchkjson # [don't see profit + I'm against of omitting errors like in the first example https://github.com/breml/errchkjson] checks types passed to the json encoding functions. Reports unsupported types and optionally reports occasions, where the check for the returned error can be omitted
+ #- execinquery # [deprecated] checks query string in Query function which reads your Go src files and warning it finds
+ #- exportloopref # [not necessary from Go 1.22] checks for pointers to enclosing loop variables
+ #- forcetypeassert # [replaced by errcheck] finds forced type assertions
+ #- gofmt # [replaced by goimports] checks whether code was gofmt-ed
+ #- gofumpt # [replaced by goimports, gofumports is not available yet] checks whether code was gofumpt-ed
+ #- gosmopolitan # reports certain i18n/l10n anti-patterns in your Go codebase
+ #- grouper # analyzes expression groups
+ #- importas # enforces consistent import aliases
+ #- maintidx # measures the maintainability index of each function
+ #- misspell # [useless] finds commonly misspelled English words in comments
+ #- nlreturn # [too strict and mostly code is not more readable] checks for a new line before return and branch statements to increase code clarity
+ #- paralleltest # [too many false positives] detects missing usage of t.Parallel() method in your Go test
+ #- tagliatelle # checks the struct tags
+ #- thelper # detects golang test helpers without t.Helper() call and checks the consistency of test helpers
+ #- wsl # [too strict and mostly code is not more readable] whitespace linter forces you to use empty lines
+
+
+issues:
+ # Maximum count of issues with the same text.
+ # Set to 0 to disable.
+ # Default: 3
+ max-same-issues: 50
+
+ exclude-rules:
+ - source: "(noinspection|TODO)"
+ linters: [ godot ]
+ - source: "//noinspection"
+ linters: [ gocritic ]
+ - path: "_test\\.go"
+ linters:
+ - bodyclose
+ - dupl
+ - funlen
+ - goconst
+ - noctx
+ - wrapcheck
\ No newline at end of file
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..0dbf61c
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,104 @@
+## Report Issues on GitHub [Issues](https://github.com/qdrant/go-client/issues)
+
+We track public bugs and feature requests using GitHub issues. Please report by [opening a new issue](https://github.com/qdrant/go-client/issues/new).
+
+**Effective Bug Reports** should include:
+
+- A clear summary or background
+- Steps to reproduce the issue
+ - Be as specific as possible
+ - Include sample code when possible
+- What you expect to happen
+- What happened
+- Additional notes (e.g., why you think the issue occurs or solutions youβve tried that didnβt work)
+
+## Contributing Code
+
+Follow these steps before submitting a pull request:
+
+### Building the Project
+
+```bash
+go build ./...
+```
+
+This will download all dependencies and compile the project.
+
+### Running Tests
+
+All test files are in the `qdrant_test` directory and use [Testcontainers Go](https://golang.testcontainers.org/) for integration tests.
+
+Run the following command to execute the test suites:
+
+```bash
+go test -v ./...
+```
+
+This command pulls a Qdrant Docker image to run integration tests. Ensure Docker is running.
+
+### Formatting and Linting
+
+Ensure your code is free from warnings and follows project standards.
+
+The project uses [Gofmt](https://go.dev/blog/gofmt) for formatting and [golangci-lint](https://github.com/golangci/golangci-lint) for linting.
+
+To format your code:
+
+```bash
+gofmt -s -w .
+```
+
+To lint your code:
+
+```bash
+golangci-lint run
+```
+
+### Preparing for a New Release
+
+The client uses generated stubs from upstream Qdrant proto definitions, which are downloaded from [qdrant/qdrant](https://github.com/qdrant/qdrant/tree/master/lib/api/src/grpc/proto).
+
+#### Steps:
+
+1. Download and generate the latest client stubs by running the following command from the project root:
+
+```bash
+BRANCH=dev sh tools/sync_proto.sh
+```
+
+2. Update the test image value in [`qdrant_test/image_test.go`](https://github.com/qdrant/go-client/blob/new-client/qdrant_test/image_test.go) to `qdrant/qdrant:dev`.
+
+3. Remove the gRPC server definitions from the auto-generated code.
+
+There is currently [no way](https://github.com/golang/protobuf/issues/373) to skip generating Go server definitions.
+
+Youβll need to manually delete them from [`snapshots_service_grpc.pb.go`](https://github.com/qdrant/go-client/blob/new-client/qdrant/snapshots_service_grpc.pb.go), [`points_service_grpc.pb.go`](https://github.com/qdrant/go-client/blob/new-client/qdrant/points_service.pb.go), and [`collections_service_grpc.pb.go`](https://github.com/qdrant/go-client/blob/new-client/qdrant/collections_service_grpc.pb.go).
+
+Remove lines starting from comments like `// CollectionsServer is the server API for Collections service.` until the end of the file. [Hereβs an example commit](https://github.com/qdrant/go-client/commit/6d04e31bb2acccf54f964a634df8930533642892).
+
+4. Implement new Qdrant methods in [`points.go`](https://github.com/qdrant/go-client/blob/new-client/qdrant/points.go), [`collections.go`](https://github.com/qdrant/go-client/blob/new-client/qdrant/collections.go), or [`qdrant.go`](https://github.com/qdrant/go-client/blob/new-client/qdrant/qdrant.go) as needed.
+
+5. If there are any new `oneOf` properties in the proto definitions, add helper constructors in [`oneof_factory.go`](https://github.com/qdrant/go-client/blob/new-client/qdrant/oneof_factory.go) following the existing patterns.
+
+6. Submit your pull request and get those approvals.
+
+### Releasing a New Version
+
+Once the new Qdrant version is live:
+
+1. Run the following command:
+
+```bash
+BRANCH=master sh tools/sync_proto.sh
+```
+
+2. Update the test image value in `qdrant_test/image_test.go` to `qdrant/qdrant:NEW_VERSION`.
+
+3. Merge the pull request.
+
+4. Push a new Git tag to publish the version:
+
+```bash
+git tag v1.11.0
+git push --tags
+```
diff --git a/README.md b/README.md
index 0c4dc80..58267b9 100644
--- a/README.md
+++ b/README.md
@@ -1,75 +1,135 @@
-# Golang Qdrant client
+
+
+
-Go client for Qdrant vector search engine
+
+ Go client for the Qdrant vector search engine.
+
-## Install
+
+
+
+
+
+
+
+
+Go client library with handy utilities for interfacing with [Qdrant](https://qdrant.tech/).
+
+## π₯ Installation
```bash
-go get github.com/qdrant/go-client
+go get -u github.com/qdrant/go-client
```
-## Usage
+## π Documentation
-Run Qdrant with enabled gRPC interface:
+- Usage examples are available throughout the [Qdrant documentation](https://qdrant.tech/documentation/quick-start/) and [API Reference](https://api.qdrant.tech/).
+- [Godoc Reference](https://pkg.go.dev/github.com/qdrant/go-client)
-```bash
-# With env variable
-docker run -p 6333:6333 -p 6334:6334 \
- -e QDRANT__SERVICE__GRPC_PORT="6334" \
- qdrant/qdrant
-```
+## π Getting started
+
+### Creating a client
+
+A client can be instantiated with
-Or by updating the configuration file:
+```go
+import "github.com/qdrant/go-client/qdrant"
-```yaml
-service:
- grpc_port: 6334
+client, err := qdrant.NewClient(&qdrant.Config{
+ Host: "localhost",
+ Port: 6334,
+})
```
-More info about gRPC in [documentation](https://qdrant.tech/documentation/quick-start/#grpc).
+Which creates a client that will connect to Qdrant on .
-### Making requests
+Internally, the high-level client uses a low-level gRPC client to interact with
+Qdrant. `qdrant.Config` provides additional options to control how the gRPC
+client is configured. The following example configures API key authentication with TLS:
```go
-package main
+import "github.com/qdrant/go-client/qdrant"
+
+client, err := qdrant.NewClient(&qdrant.Config{
+ Host: "xyz-example.eu-central.aws.cloud.qdrant.io",
+ Port: 6334,
+ APIKey: "",
+ UseTLS: true, // uses default config with minimum TLS version set to 1.3
+ // TLSConfig: &tls.Config{...},
+ // GrpcOptions: []grpc.DialOption{},
+})
+```
+
+### Working with collections
+Once a client has been created, create a new collection
+
+```go
import (
"context"
- "flag"
- "log"
- "time"
- pb "github.com/qdrant/go-client/qdrant"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
+ "github.com/qdrant/go-client/qdrant"
)
-var (
- addr = flag.String("addr", "localhost:6334", "the address to connect to")
-)
+client.CreateCollection(context.Background(), &qdrant.CreateCollection{
+ CollectionName: "{collection_name}",
+ VectorsConfig: qdrant.NewVectorsConfig(&qdrant.VectorParams{
+ Size: 4,
+ Distance: qdrant.Distance_Cosine,
+ }),
+})
+```
+
+Insert vectors into the collection
+
+```go
+operationInfo, err := client.Upsert(context.Background(), &qdrant.UpsertPoints{
+ CollectionName: "{collection_name}",
+ Points: []*qdrant.PointStruct{
+ {
+ Id: qdrant.NewIDNum(1),
+ Vectors: qdrant.NewVectors(0.05, 0.61, 0.76, 0.74),
+ Payload: qdrant.NewValueMap(map[string]any{"city": "London"}),
+ },
+ {
+ Id: qdrant.NewIDNum(2),
+ Vectors: qdrant.NewVectors(0.19, 0.81, 0.75, 0.11),
+ Payload: qdrant.NewValueMap(map[string]any{"age": 32}),
+ },
+ {
+ Id: qdrant.NewIDNum(3),
+ Vectors: qdrant.NewVectors(0.36, 0.55, 0.47, 0.94),
+ Payload: qdrant.NewValueMap(map[string]any{"vegan": true}),
+ },
+ },
+})
+```
+
+Search for similar vectors
-func main() {
- flag.Parse()
- // Set up a connection to the server.
- conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
- if err != nil {
- log.Fatalf("did not connect: %v", err)
- }
- defer conn.Close()
-
- collections_client := pb.NewCollectionsClient(conn)
-
- // Contact the server and print out its response.
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- defer cancel()
- r, err := collections_client.List(ctx, &pb.ListCollectionsRequest{})
- if err != nil {
- log.Fatalf("could not get collections: %v", err)
- }
- log.Printf("List of collections: %s", r.GetCollections())
-}
+```go
+searchResult, err := client.Query(context.Background(), &qdrant.QueryPoints{
+ CollectionName: "{collection_name}",
+ Query: qdrant.NewQuery(0.2, 0.1, 0.9, 0.7),
+})
+```
+
+Search for similar vectors with filtering condition
+
+```go
+searchResult, err := client.Query(context.Background(), &qdrant.QueryPoints{
+ CollectionName: "test_collection",
+ Query: qdrant.NewQuery(0.2, 0.1, 0.9, 0.7),
+ Filter: &qdrant.Filter{
+ Must: []*qdrant.Condition{
+ qdrant.NewMatch("city", "London"),
+ },
+ },
+ WithPayload: qdrant.NewWithPayload(true),
+})
```
-> For authenticated request (using API KEY and TLS) to Qdrant Cloud, please refer to the [authenticated](https://github.com/qdrant/go-client/tree/master/examples/authentication/main.go) example.
+## βοΈ LICENSE
-A full example for uploading, searching and filtering can be found in the [`examples`](https://github.com/qdrant/go-client/tree/master/examples) directory.
+Apache 2.0 Β© [2024](https://github.com/qdrant/go-client/blob/master/LICENSE)
diff --git a/examples/authentication/main.go b/examples/authentication/main.go
index b927492..a1311a3 100644
--- a/examples/authentication/main.go
+++ b/examples/authentication/main.go
@@ -2,45 +2,29 @@ package main
import (
"context"
- "crypto/tls"
- "flag"
"log"
- "time"
- "google.golang.org/grpc/credentials"
- "google.golang.org/grpc/metadata"
-
- pb "github.com/qdrant/go-client/qdrant"
- "google.golang.org/grpc"
-)
-
-var (
- addr = flag.String("addr", "secure.cloud.qdrant.io:6334", "the address to connect to")
+ "github.com/qdrant/go-client/qdrant"
)
func main() {
- flag.Parse()
- // Set up a connection to the server.
- config := &tls.Config{}
- conn, err := grpc.Dial(*addr, grpc.WithTransportCredentials(credentials.NewTLS(config)), grpc.WithUnaryInterceptor(interceptor))
+ // Create new client
+ client, err := qdrant.NewClient(&qdrant.Config{
+ Host: "xyz-example.eu-central.aws.cloud.qdrant.io",
+ Port: 6334,
+ APIKey: "",
+ UseTLS: true,
+ // TLSConfig: &tls.Config{...},
+ // GrpcOptions: []grpc.DialOption{},
+ })
if err != nil {
- log.Fatalf("did not connect: %v", err)
+ log.Fatalf("could not instantiate: %v", err)
}
- defer conn.Close()
-
- collections_client := pb.NewCollectionsClient(conn)
-
- // Contact the server and print out its response.
- ctx, cancel := context.WithTimeout(context.Background(), time.Second)
- defer cancel()
- r, err := collections_client.List(ctx, &pb.ListCollectionsRequest{})
+ defer client.Close()
+ // List collections
+ collections, err := client.ListCollections(context.Background())
if err != nil {
log.Fatalf("could not get collections: %v", err)
}
- log.Printf("List of collections: %s", r.GetCollections())
-}
-
-func interceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
- newCtx := metadata.AppendToOutgoingContext(ctx, "api-key", "secret-key-*******")
- return invoker(newCtx, method, req, reply, cc, opts...)
+ log.Printf("List of collections: %v", collections)
}
diff --git a/examples/main.go b/examples/main.go
index 7bd64da..e6f92fa 100644
--- a/examples/main.go
+++ b/examples/main.go
@@ -2,68 +2,56 @@ package main
import (
"context"
- "flag"
"log"
"time"
- pb "github.com/qdrant/go-client/qdrant"
- "google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
+ "github.com/qdrant/go-client/qdrant"
)
var (
- addr = flag.String("addr", "localhost:6334", "the address to connect to")
- collectionName = "test_collection"
- vectorSize uint64 = 4
- distance = pb.Distance_Dot
+ collectionName = "test_collection"
+ vectorSize uint64 = 4
+ distance = qdrant.Distance_Dot
+ defaultSegmentNumber uint64 = 2
)
func main() {
- flag.Parse()
- // Set up a connection to the server.
- conn, err := grpc.DialContext(context.Background(), *addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
+ // Create new client
+ client, err := qdrant.NewClient(&qdrant.Config{
+ Host: "localhost", // Can be omitted, default is "localhost"
+ Port: 6334, // Can be omitted, default is 6334
+ // APIKey: "",
+ // UseTLS: true,
+ // TLSConfig: &tls.Config{},
+ // GrpcOptions: []grpc.DialOption{},
+ })
if err != nil {
- log.Fatalf("Failed to connect: %v", err)
+ panic(err)
}
- defer conn.Close()
-
- // create grpc collection client
- collections_client := pb.NewCollectionsClient(conn)
-
- // Contact the server and print out its response.
+ defer client.Close()
+ // Get a context for a minute
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
-
- // Check Qdrant version
- qdrantClient := pb.NewQdrantClient(conn)
- healthCheckResult, err := qdrantClient.HealthCheck(ctx, &pb.HealthCheckRequest{})
+ // Execute health check
+ healthCheckResult, err := client.HealthCheck(ctx)
if err != nil {
log.Fatalf("Could not get health: %v", err)
- } else {
- log.Printf("Qdrant version: %s", healthCheckResult.GetVersion())
}
-
+ log.Printf("Qdrant version: %s", healthCheckResult.GetVersion())
// Delete collection
- _, err = collections_client.Delete(ctx, &pb.DeleteCollection{
- CollectionName: collectionName,
- })
+ err = client.DeleteCollection(ctx, collectionName)
if err != nil {
log.Fatalf("Could not delete collection: %v", err)
- } else {
- log.Println("Collection", collectionName, "deleted")
}
-
- // Create new collection
- var defaultSegmentNumber uint64 = 2
- _, err = collections_client.Create(ctx, &pb.CreateCollection{
+ log.Println("Collection", collectionName, "deleted")
+ // Create collection
+ err = client.CreateCollection(ctx, &qdrant.CreateCollection{
CollectionName: collectionName,
- VectorsConfig: &pb.VectorsConfig{Config: &pb.VectorsConfig_Params{
- Params: &pb.VectorParams{
- Size: vectorSize,
- Distance: distance,
- },
- }},
- OptimizersConfig: &pb.OptimizersConfigDiff{
+ VectorsConfig: qdrant.NewVectorsConfig(&qdrant.VectorParams{
+ Size: vectorSize,
+ Distance: distance,
+ }),
+ OptimizersConfig: &qdrant.OptimizersConfigDiff{
DefaultSegmentNumber: &defaultSegmentNumber,
},
})
@@ -72,235 +60,95 @@ func main() {
} else {
log.Println("Collection", collectionName, "created")
}
-
- // List all created collections
- r, err := collections_client.List(ctx, &pb.ListCollectionsRequest{})
- if err != nil {
- log.Fatalf("Could not get collections: %v", err)
- } else {
- log.Printf("List of collections: %s", r.GetCollections())
- }
-
- // Create points grpc client
- pointsClient := pb.NewPointsClient(conn)
-
- // Create keyword field index
- fieldIndex1Type := pb.FieldType_FieldTypeKeyword
- fieldIndex1Name := "city"
- _, err = pointsClient.CreateFieldIndex(ctx, &pb.CreateFieldIndexCollection{
- CollectionName: collectionName,
- FieldName: fieldIndex1Name,
- FieldType: &fieldIndex1Type,
- })
+ // List collections
+ collections, err := client.ListCollections(ctx)
if err != nil {
- log.Fatalf("Could not create field index: %v", err)
+ log.Fatalf("Could not list collections: %v", err)
} else {
- log.Println("Field index for field", fieldIndex1Name, "created")
+ log.Printf("List of collections: %s", &collections)
}
-
- // Create integer field index
- fieldIndex2Type := pb.FieldType_FieldTypeInteger
- fieldIndex2Name := "count"
- _, err = pointsClient.CreateFieldIndex(ctx, &pb.CreateFieldIndexCollection{
- CollectionName: collectionName,
- FieldName: fieldIndex2Name,
- FieldType: &fieldIndex2Type,
- })
- if err != nil {
- log.Fatalf("Could not create field index: %v", err)
- } else {
- log.Println("Field index for field", fieldIndex2Name, "created")
- }
-
- // Upsert points
+ // Upsert some data
waitUpsert := true
- upsertPoints := []*pb.PointStruct{
+ upsertPoints := []*qdrant.PointStruct{
{
- // Point Id is number or UUID
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Num{Num: 1},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.05, 0.61, 0.76, 0.74}}}},
- Payload: map[string]*pb.Value{
- "city": {
- Kind: &pb.Value_StringValue{StringValue: "Berlin"},
- },
- "country": {
- Kind: &pb.Value_StringValue{StringValue: "Germany"},
- },
- "count": {
- Kind: &pb.Value_IntegerValue{IntegerValue: 1000000},
- },
- "square": {
- Kind: &pb.Value_DoubleValue{DoubleValue: 12.5},
- },
- },
+ Id: qdrant.NewIDNum(1),
+ Vectors: qdrant.NewVectors(0.05, 0.61, 0.76, 0.74),
+ Payload: qdrant.NewValueMap(map[string]any{
+ "city": "Berlin",
+ "country": "Germany",
+ "count": 1000000,
+ "square": 12.5,
+ }),
},
{
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Num{Num: 2},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.19, 0.81, 0.75, 0.11}}}},
- Payload: map[string]*pb.Value{
- "city": {
- Kind: &pb.Value_ListValue{
- ListValue: &pb.ListValue{
- Values: []*pb.Value{
- {
- Kind: &pb.Value_StringValue{StringValue: "Berlin"},
- },
- {
- Kind: &pb.Value_StringValue{StringValue: "London"},
- },
- },
- },
- },
- },
- },
+ Id: qdrant.NewIDNum(2),
+ Vectors: qdrant.NewVectors(0.19, 0.81, 0.75, 0.11),
+ Payload: qdrant.NewValueMap(map[string]any{
+ "city": "Berlin",
+ "country": "London",
+ }),
},
{
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Num{Num: 3},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.36, 0.55, 0.47, 0.94}}}},
- Payload: map[string]*pb.Value{
- "city": {
- Kind: &pb.Value_ListValue{
- ListValue: &pb.ListValue{
- Values: []*pb.Value{
- {
- Kind: &pb.Value_StringValue{StringValue: "Berlin"},
- },
- {
- Kind: &pb.Value_StringValue{StringValue: "Moscow"},
- },
- },
- },
- },
- },
- },
+ Id: qdrant.NewIDNum(3),
+ Vectors: qdrant.NewVectors(0.36, 0.55, 0.47, 0.94),
+ Payload: qdrant.NewValueMap(map[string]any{
+ "city": []any{"Berlin", "London"},
+ }),
},
{
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Num{Num: 4},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.18, 0.01, 0.85, 0.80}}}},
- Payload: map[string]*pb.Value{
- "city": {
- Kind: &pb.Value_ListValue{
- ListValue: &pb.ListValue{
- Values: []*pb.Value{
- {
- Kind: &pb.Value_StringValue{StringValue: "London"},
- },
- {
- Kind: &pb.Value_StringValue{StringValue: "Moscow"},
- },
- },
- },
- },
- },
- },
- },
- {
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Num{Num: 5},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.24, 0.18, 0.22, 0.44}}}},
- Payload: map[string]*pb.Value{
- "count": {
- Kind: &pb.Value_ListValue{
- ListValue: &pb.ListValue{
- Values: []*pb.Value{
- {
- Kind: &pb.Value_IntegerValue{IntegerValue: 0},
- },
- },
- },
- },
- },
- },
- },
- {
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Num{Num: 6},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.35, 0.08, 0.11, 0.44}}}},
- Payload: map[string]*pb.Value{},
- },
- {
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Uuid{Uuid: "58384991-3295-4e21-b711-fd3b94fa73e3"},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.35, 0.08, 0.11, 0.44}}}},
- Payload: map[string]*pb.Value{},
+ Id: qdrant.NewID("58384991-3295-4e21-b711-fd3b94fa73e3"),
+ Vectors: qdrant.NewVectors(0.35, 0.08, 0.11, 0.44),
+ Payload: qdrant.NewValueMap(map[string]any{
+ "bool": true,
+ "list": []any{true, 1, "string"},
+ "count": 1000000,
+ "square": 12.5,
+ }),
},
}
- _, err = pointsClient.Upsert(ctx, &pb.UpsertPoints{
+ _, err = client.Upsert(ctx, &qdrant.UpsertPoints{
CollectionName: collectionName,
Wait: &waitUpsert,
Points: upsertPoints,
})
if err != nil {
log.Fatalf("Could not upsert points: %v", err)
- } else {
- log.Println("Upsert", len(upsertPoints), "points")
}
-
- // Retrieve points by ids
- pointsById, err := pointsClient.Get(ctx, &pb.GetPoints{
+ log.Println("Upsert", len(upsertPoints), "points")
+ // Get points
+ points, err := client.Get(ctx, &qdrant.GetPoints{
CollectionName: collectionName,
- Ids: []*pb.PointId{
- {PointIdOptions: &pb.PointId_Num{Num: 1}},
- {PointIdOptions: &pb.PointId_Num{Num: 2}},
+ Ids: []*qdrant.PointId{
+ qdrant.NewIDNum(1),
+ qdrant.NewIDNum(2),
},
})
if err != nil {
log.Fatalf("Could not retrieve points: %v", err)
- } else {
- log.Printf("Retrieved points: %s", pointsById.GetResult())
}
-
- // Unfiltered search
- unfilteredSearchResult, err := pointsClient.Search(ctx, &pb.SearchPoints{
+ log.Printf("Retrieved points: %s", points)
+ // Query the database
+ searchedPoints, err := client.Query(ctx, &qdrant.QueryPoints{
CollectionName: collectionName,
- Vector: []float32{0.2, 0.1, 0.9, 0.7},
- Limit: 3,
- // Include all payload and vectors in the search result
- WithVectors: &pb.WithVectorsSelector{SelectorOptions: &pb.WithVectorsSelector_Enable{Enable: true}},
- WithPayload: &pb.WithPayloadSelector{SelectorOptions: &pb.WithPayloadSelector_Enable{Enable: true}},
+ Query: qdrant.NewQuery(0.2, 0.1, 0.9, 0.7),
+ WithPayload: qdrant.NewWithPayloadInclude("city"),
})
if err != nil {
log.Fatalf("Could not search points: %v", err)
- } else {
- log.Printf("Found points: %s", unfilteredSearchResult.GetResult())
}
-
- // filtered search
- filteredSearchResult, err := pointsClient.Search(ctx, &pb.SearchPoints{
+ log.Printf("Found points: %s", searchedPoints)
+ // Query again (with filter)
+ filteredPoints, err := client.Query(ctx, &qdrant.QueryPoints{
CollectionName: collectionName,
- Vector: []float32{0.2, 0.1, 0.9, 0.7},
- Limit: 3,
- Filter: &pb.Filter{
- Should: []*pb.Condition{
- {
- ConditionOneOf: &pb.Condition_Field{
- Field: &pb.FieldCondition{
- Key: "city",
- Match: &pb.Match{
- MatchValue: &pb.Match_Keyword{
- Keyword: "London",
- },
- },
- },
- },
- },
+ Query: qdrant.NewQuery(0.2, 0.1, 0.9, 0.7),
+ Filter: &qdrant.Filter{
+ Should: []*qdrant.Condition{
+ qdrant.NewMatchKeyword("city", "Berlin"),
},
},
})
if err != nil {
log.Fatalf("Could not search points: %v", err)
- } else {
- log.Printf("Found points: %s", filteredSearchResult.GetResult())
}
+ log.Printf("Found points: %s", filteredPoints)
}
diff --git a/go.mod b/go.mod
index 75414c3..5d2574e 100644
--- a/go.mod
+++ b/go.mod
@@ -1,36 +1,40 @@
module github.com/qdrant/go-client
-go 1.21
+go 1.22.2
require (
- github.com/testcontainers/testcontainers-go/modules/qdrant v0.29.1
- google.golang.org/grpc v1.62.1
- google.golang.org/protobuf v1.33.0
+ github.com/stretchr/testify v1.9.0
+ github.com/testcontainers/testcontainers-go v0.33.0
+ google.golang.org/grpc v1.66.0
+ google.golang.org/protobuf v1.34.2
)
require (
dario.cat/mergo v1.0.0 // indirect
- github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
- github.com/Microsoft/go-winio v0.6.1 // indirect
- github.com/Microsoft/hcsshim v0.12.0 // indirect
+ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
+ github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect
- github.com/containerd/containerd v1.7.14 // indirect
+ github.com/containerd/containerd v1.7.18 // indirect
github.com/containerd/log v0.1.0 // indirect
+ github.com/containerd/platforms v0.2.1 // indirect
github.com/cpuguy83/dockercfg v0.3.1 // indirect
- github.com/distribution/reference v0.5.0 // indirect
- github.com/docker/docker v25.0.4+incompatible // indirect
+ github.com/creack/pty v1.1.21 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/distribution/reference v0.6.0 // indirect
+ github.com/docker/docker v27.1.1+incompatible // indirect
github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
- github.com/go-ole/go-ole v1.3.0 // indirect
+ github.com/go-ole/go-ole v1.2.6 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
- github.com/golang/protobuf v1.5.4 // indirect
github.com/google/uuid v1.6.0 // indirect
- github.com/klauspost/compress v1.17.7 // indirect
- github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a // indirect
+ github.com/klauspost/compress v1.17.4 // indirect
+ github.com/kr/pretty v0.3.1 // indirect
+ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
+ github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/sys/sequential v0.5.0 // indirect
github.com/moby/sys/user v0.1.0 // indirect
@@ -39,23 +43,26 @@ require (
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
- github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect
- github.com/shirou/gopsutil/v3 v3.24.2 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
+ github.com/rogpeppe/go-internal v1.10.0 // indirect
+ github.com/shirou/gopsutil/v3 v3.23.12 // indirect
github.com/shoenig/go-m1cpu v0.1.6 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
- github.com/testcontainers/testcontainers-go v0.29.1 // indirect
- github.com/tklauser/go-sysconf v0.3.13 // indirect
- github.com/tklauser/numcpus v0.7.0 // indirect
- github.com/yusufpapurcu/wmi v1.2.4 // indirect
+ github.com/tklauser/go-sysconf v0.3.12 // indirect
+ github.com/tklauser/numcpus v0.6.1 // indirect
+ github.com/yusufpapurcu/wmi v1.2.3 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
go.opentelemetry.io/otel v1.24.0 // indirect
+ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
go.opentelemetry.io/otel/metric v1.24.0 // indirect
+ go.opentelemetry.io/otel/sdk v1.24.0 // indirect
go.opentelemetry.io/otel/trace v1.24.0 // indirect
- golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 // indirect
- golang.org/x/mod v0.16.0 // indirect
- golang.org/x/net v0.22.0 // indirect
- golang.org/x/sys v0.18.0 // indirect
- golang.org/x/text v0.14.0 // indirect
- golang.org/x/tools v0.19.0 // indirect
- google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7 // indirect
+ golang.org/x/crypto v0.26.0 // indirect
+ golang.org/x/net v0.28.0 // indirect
+ golang.org/x/sys v0.24.0 // indirect
+ golang.org/x/text v0.17.0 // indirect
+ golang.org/x/time v0.3.0 // indirect
+ google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
)
diff --git a/go.sum b/go.sum
index 01f3733..985a296 100644
--- a/go.sum
+++ b/go.sum
@@ -2,29 +2,30 @@ dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
-github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 h1:L/gRVlceqvL25UVaW/CKtUDjefjrs0SPonmDGUVOYP0=
-github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
-github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow=
-github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM=
-github.com/Microsoft/hcsshim v0.12.0 h1:rbICA+XZFwrBef2Odk++0LjFvClNCJGRK+fsrP254Ts=
-github.com/Microsoft/hcsshim v0.12.0/go.mod h1:RZV12pcHCXQ42XnlQ3pz6FZfmrC1C+R4gaOHhRNML1g=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
-github.com/containerd/containerd v1.7.14 h1:H/XLzbnGuenZEGK+v0RkwTdv2u1QFAruMe5N0GNPJwA=
-github.com/containerd/containerd v1.7.14/go.mod h1:YMC9Qt5yzNqXx/fO4j/5yYVIHXSRrlB3H7sxkUTvspg=
+github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao=
+github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
+github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
+github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
github.com/cpuguy83/dockercfg v0.3.1 h1:/FpZ+JaygUR/lZP2NlFI2DVfrOEMAIKP5wWEJdoYe9E=
github.com/cpuguy83/dockercfg v0.3.1/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
-github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
-github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/creack/pty v1.1.21 h1:1/QdRyBaHHJP61QkWMXlOIBfsgdDeeKfK8SYVUWJKf0=
+github.com/creack/pty v1.1.21/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
-github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
-github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
-github.com/docker/docker v25.0.4+incompatible h1:XITZTrq+52tZyZxUOtFIahUf3aH367FLxJzt9vZeAF8=
-github.com/docker/docker v25.0.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
+github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -36,13 +37,10 @@ github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
-github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
-github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
-github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
-github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
@@ -53,13 +51,18 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rH
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
-github.com/klauspost/compress v1.17.7 h1:ehO88t2UGzQK66LMdE8tibEd1ErmzZjNEqWkjLAKQQg=
-github.com/klauspost/compress v1.17.7/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
+github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
+github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
+github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
-github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a h1:3Bm7EwfUQUvhNeKIkUct/gl9eod1TcXuj8stxvi/GoI=
-github.com/lufia/plan9stats v0.0.0-20240226150601-1dcf7310316a/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
+github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
@@ -74,15 +77,18 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
+github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
-github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU=
-github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
-github.com/shirou/gopsutil/v3 v3.24.2 h1:kcR0erMbLg5/3LcInpw0X/rrPSqq4CDPyI6A6ZRC18Y=
-github.com/shirou/gopsutil/v3 v3.24.2/go.mod h1:tSg/594BcA+8UdQU2XcW803GWYgdtauFFPgJCJKZlVk=
+github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
+github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
+github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
+github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
+github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
@@ -98,32 +104,28 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
-github.com/testcontainers/testcontainers-go v0.29.1 h1:z8kxdFlovA2y97RWx98v/TQ+tR+SXZm6p35M+xB92zk=
-github.com/testcontainers/testcontainers-go v0.29.1/go.mod h1:SnKnKQav8UcgtKqjp/AD8bE1MqZm+3TDb/B8crE3XnI=
-github.com/testcontainers/testcontainers-go/modules/qdrant v0.29.1 h1:8Bu6UgUoIzl3gBXHdfiVrfH3fOXbZbU8TjaFVzvH524=
-github.com/testcontainers/testcontainers-go/modules/qdrant v0.29.1/go.mod h1:e/Xu0sSGSeNN6aPMPWY9hhYTjrBHJHetUI0TZPd9L6g=
+github.com/testcontainers/testcontainers-go v0.33.0 h1:zJS9PfXYT5O0ZFXM2xxXfk4J5UMw/kRiISng037Gxdw=
+github.com/testcontainers/testcontainers-go v0.33.0/go.mod h1:W80YpTa8D5C3Yy16icheD01UTDu+LmXIA2Keo+jWtT8=
+github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
-github.com/tklauser/go-sysconf v0.3.13 h1:GBUpcahXSpR2xN01jhkNAbTLRk2Yzgggk8IM08lq3r4=
-github.com/tklauser/go-sysconf v0.3.13/go.mod h1:zwleP4Q4OehZHGn4CYZDipCgg9usW5IJePewFCGVEa0=
+github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
-github.com/tklauser/numcpus v0.7.0 h1:yjuerZP127QG9m5Zh/mSO4wqurYil27tHrqwRoRjpr4=
-github.com/tklauser/numcpus v0.7.0/go.mod h1:bb6dMVcj8A42tSE7i32fsIUCbQNllK5iDguyOZRUzAY=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
-github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0=
-github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
+github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
-go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkEZCJWobwBqMwC0cwCq8/wkkRy/OowZg5OArWZrM=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
-go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
-go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
+go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
+go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
@@ -131,23 +133,19 @@ go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v8
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
-golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ=
-golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc=
+golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
+golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
-golang.org/x/mod v0.16.0 h1:QX4fJ0Rr5cPQCF7O9lh9Se4pmwfwskqZfq5moyldzic=
-golang.org/x/mod v0.16.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
-golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc=
-golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
+golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
-golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -155,40 +153,41 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
-golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
-golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg=
+golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
+golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
-golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
-golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
+golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
-golang.org/x/tools v0.19.0 h1:tfGCXNR1OsFG+sVdLAitlpjAvD/I6dHDKnYrpEZUHkw=
-golang.org/x/tools v0.19.0/go.mod h1:qoJWxmGSIBmAeriMx19ogtrEPrGtDbPK634QFIcLAhc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
-google.golang.org/genproto v0.0.0-20240123012728-ef4313101c80 h1:KAeGQVN3M9nD0/bQXnr/ClcEMJ968gUXJQ9pwfSynuQ=
-google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80 h1:Lj5rbfG876hIAYFjqiJnPHfhXbv+nzTWfm04Fg/XSVU=
-google.golang.org/genproto/googleapis/api v0.0.0-20240123012728-ef4313101c80/go.mod h1:4jWUdICTdgc3Ibxmr8nAJiiLHwQBY0UI0XZcEMaFKaA=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7 h1:8EeVk1VKMD+GD/neyEHGmz7pFblqPjHoi+PGQIlLx2s=
-google.golang.org/genproto/googleapis/rpc v0.0.0-20240311173647-c811ad7063a7/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
-google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
-google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
-google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
-google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13 h1:vlzZttNJGVqTsRFU9AmdnrcO1Znh8Ew9kCD//yjigk0=
+google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117 h1:+rdxYoE3E5htTEWIe15GlN6IfvbURM//Jt0mmkmm6ZU=
+google.golang.org/genproto/googleapis/api v0.0.0-20240604185151-ef581f913117/go.mod h1:OimBR/bc1wPO9iV4NC2bpyjy3VnAwZh5EBPQdtaE5oo=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed h1:J6izYgfBXAI3xTKLgxzTmUltdYaLsuBxFCgDHWJ/eXg=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20240827150818-7e3bb234dfed/go.mod h1:UqMtugtsSgubUsoxbuAoiCXvqvErP7Gf0so0mK9tHxU=
+google.golang.org/grpc v1.66.0 h1:DibZuoBznOxbDQxRINckZcUvnCEvrW9pcWIE2yF9r1c=
+google.golang.org/grpc v1.66.0/go.mod h1:s3/l6xSSCURdVfAnL+TqCNMyTDAGN6+lZeVxnZR128Y=
+google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
+google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
-gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY=
-gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
+gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
+gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
diff --git a/internal/proto/collections.proto b/internal/proto/collections.proto
new file mode 100644
index 0000000..864ca6a
--- /dev/null
+++ b/internal/proto/collections.proto
@@ -0,0 +1,654 @@
+syntax = "proto3";
+package qdrant;
+
+enum Datatype {
+ Default = 0;
+ Float32 = 1;
+ Uint8 = 2;
+ Float16 = 3;
+}
+
+message VectorParams {
+ uint64 size = 1; // Size of the vectors
+ Distance distance = 2; // Distance function used for comparing vectors
+ optional HnswConfigDiff hnsw_config = 3; // Configuration of vector HNSW graph. If omitted - the collection configuration will be used
+ optional QuantizationConfig quantization_config = 4; // Configuration of vector quantization config. If omitted - the collection configuration will be used
+ optional bool on_disk = 5; // If true - serve vectors from disk. If set to false, the vectors will be loaded in RAM.
+ optional Datatype datatype = 6; // Data type of the vectors
+ optional MultiVectorConfig multivector_config = 7; // Configuration for multi-vector search
+}
+
+message VectorParamsDiff {
+ optional HnswConfigDiff hnsw_config = 1; // Update params for HNSW index. If empty object - it will be unset
+ optional QuantizationConfigDiff quantization_config = 2; // Update quantization params. If none - it is left unchanged.
+ optional bool on_disk = 3; // If true - serve vectors from disk. If set to false, the vectors will be loaded in RAM.
+}
+
+message VectorParamsMap {
+ map map = 1;
+}
+
+message VectorParamsDiffMap {
+ map map = 1;
+}
+
+message VectorsConfig {
+ oneof config {
+ VectorParams params = 1;
+ VectorParamsMap params_map = 2;
+ }
+}
+
+message VectorsConfigDiff {
+ oneof config {
+ VectorParamsDiff params = 1;
+ VectorParamsDiffMap params_map = 2;
+ }
+}
+
+enum Modifier {
+ None = 0;
+ Idf = 1; // Apply Inverse Document Frequency
+}
+
+message SparseVectorParams {
+ optional SparseIndexConfig index = 1; // Configuration of sparse index
+ optional Modifier modifier = 2; // If set - apply modifier to the vector values
+}
+
+message SparseVectorConfig {
+ map map = 1;
+}
+
+enum MultiVectorComparator {
+ MaxSim = 0;
+}
+
+message MultiVectorConfig {
+ MultiVectorComparator comparator = 1; // Comparator for multi-vector search
+}
+
+
+message GetCollectionInfoRequest {
+ string collection_name = 1; // Name of the collection
+}
+
+message CollectionExistsRequest {
+ string collection_name = 1;
+}
+
+message CollectionExists {
+ bool exists = 1;
+}
+
+message CollectionExistsResponse {
+ CollectionExists result = 1;
+ double time = 2; // Time spent to process
+}
+
+message ListCollectionsRequest {
+}
+
+message CollectionDescription {
+ string name = 1; // Name of the collection
+}
+
+message GetCollectionInfoResponse {
+ CollectionInfo result = 1;
+ double time = 2; // Time spent to process
+}
+
+message ListCollectionsResponse {
+ repeated CollectionDescription collections = 1;
+ double time = 2; // Time spent to process
+}
+
+enum Distance {
+ UnknownDistance = 0;
+ Cosine = 1;
+ Euclid = 2;
+ Dot = 3;
+ Manhattan = 4;
+}
+
+enum CollectionStatus {
+ UnknownCollectionStatus = 0;
+ Green = 1; // All segments are ready
+ Yellow = 2; // Optimization in process
+ Red = 3; // Something went wrong
+ Grey = 4; // Optimization is pending
+}
+
+enum PayloadSchemaType {
+ UnknownType = 0;
+ Keyword = 1;
+ Integer = 2;
+ Float = 3;
+ Geo = 4;
+ Text = 5;
+ Bool = 6;
+ Datetime = 7;
+ Uuid = 8;
+}
+
+enum QuantizationType {
+ UnknownQuantization = 0;
+ Int8 = 1;
+}
+
+enum CompressionRatio {
+ x4 = 0;
+ x8 = 1;
+ x16 = 2;
+ x32 = 3;
+ x64 = 4;
+}
+
+message OptimizerStatus {
+ bool ok = 1;
+ string error = 2;
+}
+
+message HnswConfigDiff {
+ /*
+ Number of edges per node in the index graph. Larger the value - more accurate the search, more space required.
+ */
+ optional uint64 m = 1;
+ /*
+ Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index.
+ */
+ optional uint64 ef_construct = 2;
+ /*
+ Minimal size (in KiloBytes) of vectors for additional payload-based indexing.
+ If the payload chunk is smaller than `full_scan_threshold` additional indexing won't be used -
+ in this case full-scan search should be preferred by query planner and additional indexing is not required.
+ Note: 1 Kb = 1 vector of size 256
+ */
+ optional uint64 full_scan_threshold = 3;
+ /*
+ Number of parallel threads used for background index building.
+ If 0 - automatically select from 8 to 16.
+ Best to keep between 8 and 16 to prevent likelihood of building broken/inefficient HNSW graphs.
+ On small CPUs, less threads are used.
+ */
+ optional uint64 max_indexing_threads = 4;
+ /*
+ Store HNSW index on disk. If set to false, the index will be stored in RAM.
+ */
+ optional bool on_disk = 5;
+ /*
+ Number of additional payload-aware links per node in the index graph. If not set - regular M parameter will be used.
+ */
+ optional uint64 payload_m = 6;
+}
+
+message SparseIndexConfig {
+ /*
+ Prefer a full scan search upto (excluding) this number of vectors.
+ Note: this is number of vectors, not KiloBytes.
+ */
+ optional uint64 full_scan_threshold = 1;
+ /*
+ Store inverted index on disk. If set to false, the index will be stored in RAM.
+ */
+ optional bool on_disk = 2;
+ /*
+ Datatype used to store weights in the index.
+ */
+ optional Datatype datatype = 3;
+}
+
+message WalConfigDiff {
+ optional uint64 wal_capacity_mb = 1; // Size of a single WAL block file
+ optional uint64 wal_segments_ahead = 2; // Number of segments to create in advance
+}
+
+message OptimizersConfigDiff {
+ /*
+ The minimal fraction of deleted vectors in a segment, required to perform segment optimization
+ */
+ optional double deleted_threshold = 1;
+ /*
+ The minimal number of vectors in a segment, required to perform segment optimization
+ */
+ optional uint64 vacuum_min_vector_number = 2;
+ /*
+ Target amount of segments the optimizer will try to keep.
+ Real amount of segments may vary depending on multiple parameters:
+
+ - Amount of stored points.
+ - Current write RPS.
+
+ It is recommended to select the default number of segments as a factor of the number of search threads,
+ so that each segment would be handled evenly by one of the threads.
+ */
+ optional uint64 default_segment_number = 3;
+ /*
+ Do not create segments larger this size (in kilobytes).
+ Large segments might require disproportionately long indexation times,
+ therefore it makes sense to limit the size of segments.
+
+ If indexing speed is more important - make this parameter lower.
+ If search speed is more important - make this parameter higher.
+ Note: 1Kb = 1 vector of size 256
+ If not set, will be automatically selected considering the number of available CPUs.
+ */
+ optional uint64 max_segment_size = 4;
+ /*
+ Maximum size (in kilobytes) of vectors to store in-memory per segment.
+ Segments larger than this threshold will be stored as read-only memmaped file.
+
+ Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value.
+
+ To disable memmap storage, set this to `0`.
+
+ Note: 1Kb = 1 vector of size 256
+ */
+ optional uint64 memmap_threshold = 5;
+ /*
+ Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing
+
+ Default value is 20,000, based on .
+
+ To disable vector indexing, set to `0`.
+
+ Note: 1kB = 1 vector of size 256.
+ */
+ optional uint64 indexing_threshold = 6;
+ /*
+ Interval between forced flushes.
+ */
+ optional uint64 flush_interval_sec = 7;
+ /*
+ Max number of threads (jobs) for running optimizations per shard.
+ Note: each optimization job will also use `max_indexing_threads` threads by itself for index building.
+ If null - have no limit and choose dynamically to saturate CPU.
+ If 0 - no optimization threads, optimizations will be disabled.
+ */
+ optional uint64 max_optimization_threads = 8;
+}
+
+message ScalarQuantization {
+ QuantizationType type = 1; // Type of quantization
+ optional float quantile = 2; // Number of bits to use for quantization
+ optional bool always_ram = 3; // If true - quantized vectors always will be stored in RAM, ignoring the config of main storage
+}
+
+message ProductQuantization {
+ CompressionRatio compression = 1; // Compression ratio
+ optional bool always_ram = 2; // If true - quantized vectors always will be stored in RAM, ignoring the config of main storage
+}
+
+message BinaryQuantization {
+ optional bool always_ram = 1; // If true - quantized vectors always will be stored in RAM, ignoring the config of main storage
+}
+
+message QuantizationConfig {
+ oneof quantization {
+ ScalarQuantization scalar = 1;
+ ProductQuantization product = 2;
+ BinaryQuantization binary = 3;
+ }
+}
+
+message Disabled {
+
+}
+
+message QuantizationConfigDiff {
+ oneof quantization {
+ ScalarQuantization scalar = 1;
+ ProductQuantization product = 2;
+ Disabled disabled = 3;
+ BinaryQuantization binary = 4;
+ }
+}
+
+enum ShardingMethod {
+ Auto = 0; // Auto-sharding based on record ids
+ Custom = 1; // Shard by user-defined key
+}
+
+message CreateCollection {
+ string collection_name = 1; // Name of the collection
+ reserved 2; // Deprecated
+ reserved 3; // Deprecated
+ optional HnswConfigDiff hnsw_config = 4; // Configuration of vector index
+ optional WalConfigDiff wal_config = 5; // Configuration of the Write-Ahead-Log
+ optional OptimizersConfigDiff optimizers_config = 6; // Configuration of the optimizers
+ optional uint32 shard_number = 7; // Number of shards in the collection, default is 1 for standalone, otherwise equal to the number of nodes. Minimum is 1
+ optional bool on_disk_payload = 8; // If true - point's payload will not be stored in memory
+ optional uint64 timeout = 9; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied
+ optional VectorsConfig vectors_config = 10; // Configuration for vectors
+ optional uint32 replication_factor = 11; // Number of replicas of each shard that network tries to maintain, default = 1
+ optional uint32 write_consistency_factor = 12; // How many replicas should apply the operation for us to consider it successful, default = 1
+ optional string init_from_collection = 13; // Specify name of the other collection to copy data from
+ optional QuantizationConfig quantization_config = 14; // Quantization configuration of vector
+ optional ShardingMethod sharding_method = 15; // Sharding method
+ optional SparseVectorConfig sparse_vectors_config = 16; // Configuration for sparse vectors
+}
+
+message UpdateCollection {
+ string collection_name = 1; // Name of the collection
+ optional OptimizersConfigDiff optimizers_config = 2; // New configuration parameters for the collection. This operation is blocking, it will only proceed once all current optimizations are complete
+ optional uint64 timeout = 3; // Wait timeout for operation commit in seconds if blocking, if not specified - default value will be supplied
+ optional CollectionParamsDiff params = 4; // New configuration parameters for the collection
+ optional HnswConfigDiff hnsw_config = 5; // New HNSW parameters for the collection index
+ optional VectorsConfigDiff vectors_config = 6; // New vector parameters
+ optional QuantizationConfigDiff quantization_config = 7; // Quantization configuration of vector
+ optional SparseVectorConfig sparse_vectors_config = 8; // New sparse vector parameters
+}
+
+message DeleteCollection {
+ string collection_name = 1; // Name of the collection
+ optional uint64 timeout = 2; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied
+}
+
+message CollectionOperationResponse {
+ bool result = 1; // if operation made changes
+ double time = 2; // Time spent to process
+}
+
+message CollectionParams {
+ reserved 1; // Deprecated
+ reserved 2; // Deprecated
+ uint32 shard_number = 3; // Number of shards in collection
+ bool on_disk_payload = 4; // If true - point's payload will not be stored in memory
+ optional VectorsConfig vectors_config = 5; // Configuration for vectors
+ optional uint32 replication_factor = 6; // Number of replicas of each shard that network tries to maintain
+ optional uint32 write_consistency_factor = 7; // How many replicas should apply the operation for us to consider it successful
+ optional uint32 read_fan_out_factor = 8; // Fan-out every read request to these many additional remote nodes (and return first available response)
+ optional ShardingMethod sharding_method = 9; // Sharding method
+ optional SparseVectorConfig sparse_vectors_config = 10; // Configuration for sparse vectors
+}
+
+message CollectionParamsDiff {
+ optional uint32 replication_factor = 1; // Number of replicas of each shard that network tries to maintain
+ optional uint32 write_consistency_factor = 2; // How many replicas should apply the operation for us to consider it successful
+ optional bool on_disk_payload = 3; // If true - point's payload will not be stored in memory
+ optional uint32 read_fan_out_factor = 4; // Fan-out every read request to these many additional remote nodes (and return first available response)
+}
+
+message CollectionConfig {
+ CollectionParams params = 1; // Collection parameters
+ HnswConfigDiff hnsw_config = 2; // Configuration of vector index
+ OptimizersConfigDiff optimizer_config = 3; // Configuration of the optimizers
+ WalConfigDiff wal_config = 4; // Configuration of the Write-Ahead-Log
+ optional QuantizationConfig quantization_config = 5; // Configuration of the vector quantization
+}
+
+enum TokenizerType {
+ Unknown = 0;
+ Prefix = 1;
+ Whitespace = 2;
+ Word = 3;
+ Multilingual = 4;
+}
+
+message KeywordIndexParams {
+ optional bool is_tenant = 1; // If true - used for tenant optimization.
+ optional bool on_disk = 2; // If true - store index on disk.
+}
+
+message IntegerIndexParams {
+ bool lookup = 1; // If true - support direct lookups.
+ bool range = 2; // If true - support ranges filters.
+ optional bool is_principal = 3; // If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests.
+ optional bool on_disk = 4; // If true - store index on disk.
+}
+
+message FloatIndexParams {
+ optional bool on_disk = 1; // If true - store index on disk.
+ optional bool is_principal = 2; // If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests.
+}
+
+message GeoIndexParams {
+}
+
+message TextIndexParams {
+ TokenizerType tokenizer = 1; // Tokenizer type
+ optional bool lowercase = 2; // If true - all tokens will be lowercase
+ optional uint64 min_token_len = 3; // Minimal token length
+ optional uint64 max_token_len = 4; // Maximal token length
+}
+
+message BoolIndexParams {
+}
+
+message DatetimeIndexParams {
+ optional bool on_disk = 1; // If true - store index on disk.
+ optional bool is_principal = 2; // If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests.
+}
+
+message UuidIndexParams {
+ optional bool is_tenant = 1; // If true - used for tenant optimization.
+ optional bool on_disk = 2; // If true - store index on disk.
+}
+
+message PayloadIndexParams {
+ oneof index_params {
+ KeywordIndexParams keyword_index_params = 3; // Parameters for keyword index
+ IntegerIndexParams integer_index_params = 2; // Parameters for integer index
+ FloatIndexParams float_index_params = 4; // Parameters for float index
+ GeoIndexParams geo_index_params = 5; // Parameters for geo index
+ TextIndexParams text_index_params = 1; // Parameters for text index
+ BoolIndexParams bool_index_params = 6; // Parameters for bool index
+ DatetimeIndexParams datetime_index_params = 7; // Parameters for datetime index
+ UuidIndexParams uuid_index_params = 8; // Parameters for uuid index
+ }
+}
+
+message PayloadSchemaInfo {
+ PayloadSchemaType data_type = 1; // Field data type
+ optional PayloadIndexParams params = 2; // Field index parameters
+ optional uint64 points = 3; // Number of points indexed within this field indexed
+}
+
+message CollectionInfo {
+ CollectionStatus status = 1; // operating condition of the collection
+ OptimizerStatus optimizer_status = 2; // status of collection optimizers
+ optional uint64 vectors_count = 3; // Approximate number of vectors in the collection
+ uint64 segments_count = 4; // Number of independent segments
+ reserved 5; // Deprecated
+ reserved 6; // Deprecated
+ CollectionConfig config = 7; // Configuration
+ map payload_schema = 8; // Collection data types
+ optional uint64 points_count = 9; // Approximate number of points in the collection
+ optional uint64 indexed_vectors_count = 10; // Approximate number of indexed vectors in the collection.
+}
+
+message ChangeAliases {
+ repeated AliasOperations actions = 1; // List of actions
+ optional uint64 timeout = 2; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied
+}
+
+message AliasOperations {
+ oneof action {
+ CreateAlias create_alias = 1;
+ RenameAlias rename_alias = 2;
+ DeleteAlias delete_alias = 3;
+ }
+}
+
+message CreateAlias {
+ string collection_name = 1; // Name of the collection
+ string alias_name = 2; // New name of the alias
+}
+
+message RenameAlias {
+ string old_alias_name = 1; // Name of the alias to rename
+ string new_alias_name = 2; // Name of the alias
+}
+
+message DeleteAlias {
+ string alias_name = 1; // Name of the alias
+}
+
+message ListAliasesRequest {
+}
+
+message ListCollectionAliasesRequest {
+ string collection_name = 1; // Name of the collection
+}
+
+message AliasDescription {
+ string alias_name = 1; // Name of the alias
+ string collection_name = 2; // Name of the collection
+}
+
+message ListAliasesResponse {
+ repeated AliasDescription aliases = 1;
+ double time = 2; // Time spent to process
+}
+
+message CollectionClusterInfoRequest {
+ string collection_name = 1; // Name of the collection
+}
+
+enum ReplicaState {
+ Active = 0; // Active and sound
+ Dead = 1; // Failed for some reason
+ Partial = 2; // The shard is partially loaded and is currently receiving data from other shards
+ Initializing = 3; // Collection is being created
+ Listener = 4; // A shard which receives data, but is not used for search; Useful for backup shards
+ PartialSnapshot = 5; // Deprecated: snapshot shard transfer is in progress; Updates should not be sent to (and are ignored by) the shard
+ Recovery = 6; // Shard is undergoing recovered by an external node; Normally rejects updates, accepts updates if force is true
+ Resharding = 7; // Points are being migrated to this shard as part of resharding
+}
+
+message ShardKey {
+ oneof key {
+ string keyword = 1; // String key
+ uint64 number = 2; // Number key
+ }
+}
+
+message LocalShardInfo {
+ uint32 shard_id = 1; // Local shard id
+ uint64 points_count = 2; // Number of points in the shard
+ ReplicaState state = 3; // Is replica active
+ optional ShardKey shard_key = 4; // User-defined shard key
+}
+
+message RemoteShardInfo {
+ uint32 shard_id = 1; // Local shard id
+ uint64 peer_id = 2; // Remote peer id
+ ReplicaState state = 3; // Is replica active
+ optional ShardKey shard_key = 4; // User-defined shard key
+}
+
+message ShardTransferInfo {
+ uint32 shard_id = 1; // Local shard id
+ optional uint32 to_shard_id = 5;
+ uint64 from = 2;
+ uint64 to = 3;
+ bool sync = 4; // If `true` transfer is a synchronization of a replicas; If `false` transfer is a moving of a shard from one peer to another
+}
+
+message ReshardingInfo {
+ uint32 shard_id = 1;
+ uint64 peer_id = 2;
+ optional ShardKey shard_key = 3;
+}
+
+message CollectionClusterInfoResponse {
+ uint64 peer_id = 1; // ID of this peer
+ uint64 shard_count = 2; // Total number of shards
+ repeated LocalShardInfo local_shards = 3; // Local shards
+ repeated RemoteShardInfo remote_shards = 4; // Remote shards
+ repeated ShardTransferInfo shard_transfers = 5; // Shard transfers
+ // TODO(resharding): enable on release:
+ // repeated ReshardingInfo resharding_operations = 6; // Resharding operations
+}
+
+message MoveShard {
+ uint32 shard_id = 1; // Local shard id
+ optional uint32 to_shard_id = 5;
+ uint64 from_peer_id = 2;
+ uint64 to_peer_id = 3;
+ optional ShardTransferMethod method = 4;
+}
+
+message ReplicateShard {
+ uint32 shard_id = 1; // Local shard id
+ optional uint32 to_shard_id = 5;
+ uint64 from_peer_id = 2;
+ uint64 to_peer_id = 3;
+ optional ShardTransferMethod method = 4;
+}
+
+message AbortShardTransfer {
+ uint32 shard_id = 1; // Local shard id
+ optional uint32 to_shard_id = 4;
+ uint64 from_peer_id = 2;
+ uint64 to_peer_id = 3;
+}
+
+message RestartTransfer {
+ uint32 shard_id = 1; // Local shard id
+ optional uint32 to_shard_id = 5;
+ uint64 from_peer_id = 2;
+ uint64 to_peer_id = 3;
+ ShardTransferMethod method = 4;
+}
+
+enum ShardTransferMethod {
+ StreamRecords = 0; // Stream shard records in batches
+ Snapshot = 1; // Snapshot the shard and recover it on the target peer
+ WalDelta = 2; // Resolve WAL delta between peers and transfer the difference
+ ReshardingStreamRecords = 3; // Stream shard records in batches for resharding
+}
+
+message Replica {
+ uint32 shard_id = 1;
+ uint64 peer_id = 2;
+}
+
+message CreateShardKey {
+ ShardKey shard_key = 1; // User-defined shard key
+ optional uint32 shards_number = 2; // Number of shards to create per shard key
+ optional uint32 replication_factor = 3; // Number of replicas of each shard to create
+ repeated uint64 placement = 4; // List of peer ids, allowed to create shards. If empty - all peers are allowed
+}
+
+message DeleteShardKey {
+ ShardKey shard_key = 1; // Shard key to delete
+}
+
+message UpdateCollectionClusterSetupRequest {
+ string collection_name = 1; // Name of the collection
+ oneof operation {
+ MoveShard move_shard = 2;
+ ReplicateShard replicate_shard = 3;
+ AbortShardTransfer abort_transfer = 4;
+ Replica drop_replica = 5;
+ CreateShardKey create_shard_key = 7;
+ DeleteShardKey delete_shard_key = 8;
+ RestartTransfer restart_transfer = 9;
+ }
+ optional uint64 timeout = 6; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied
+}
+
+message UpdateCollectionClusterSetupResponse {
+ bool result = 1;
+}
+
+message CreateShardKeyRequest {
+ string collection_name = 1; // Name of the collection
+ CreateShardKey request = 2; // Request to create shard key
+ optional uint64 timeout = 3; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied
+}
+
+message DeleteShardKeyRequest {
+ string collection_name = 1; // Name of the collection
+ DeleteShardKey request = 2; // Request to delete shard key
+ optional uint64 timeout = 3; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied
+}
+
+message CreateShardKeyResponse {
+ bool result = 1;
+}
+
+message DeleteShardKeyResponse {
+ bool result = 1;
+}
diff --git a/internal/proto/collections_service.proto b/internal/proto/collections_service.proto
new file mode 100644
index 0000000..0a77844
--- /dev/null
+++ b/internal/proto/collections_service.proto
@@ -0,0 +1,60 @@
+syntax = "proto3";
+
+import "collections.proto";
+
+package qdrant;
+
+service Collections {
+ /*
+ Get detailed information about specified existing collection
+ */
+ rpc Get (GetCollectionInfoRequest) returns (GetCollectionInfoResponse) {}
+ /*
+ Get list name of all existing collections
+ */
+ rpc List (ListCollectionsRequest) returns (ListCollectionsResponse) {}
+ /*
+ Create new collection with given parameters
+ */
+ rpc Create (CreateCollection) returns (CollectionOperationResponse) {}
+ /*
+ Update parameters of the existing collection
+ */
+ rpc Update (UpdateCollection) returns (CollectionOperationResponse) {}
+ /*
+ Drop collection and all associated data
+ */
+ rpc Delete (DeleteCollection) returns (CollectionOperationResponse) {}
+ /*
+ Update Aliases of the existing collection
+ */
+ rpc UpdateAliases (ChangeAliases) returns (CollectionOperationResponse) {}
+ /*
+ Get list of all aliases for a collection
+ */
+ rpc ListCollectionAliases (ListCollectionAliasesRequest) returns (ListAliasesResponse) {}
+ /*
+ Get list of all aliases for all existing collections
+ */
+ rpc ListAliases (ListAliasesRequest) returns (ListAliasesResponse) {}
+ /*
+ Get cluster information for a collection
+ */
+ rpc CollectionClusterInfo (CollectionClusterInfoRequest) returns (CollectionClusterInfoResponse) {}
+ /*
+ Check the existence of a collection
+ */
+ rpc CollectionExists (CollectionExistsRequest) returns (CollectionExistsResponse) {}
+ /*
+ Update cluster setup for a collection
+ */
+ rpc UpdateCollectionClusterSetup (UpdateCollectionClusterSetupRequest) returns (UpdateCollectionClusterSetupResponse) {}
+ /*
+ Create shard key
+ */
+ rpc CreateShardKey (CreateShardKeyRequest) returns (CreateShardKeyResponse) {}
+ /*
+ Delete shard key
+ */
+ rpc DeleteShardKey (DeleteShardKeyRequest) returns (DeleteShardKeyResponse) {}
+}
diff --git a/internal/proto/json_with_int.proto b/internal/proto/json_with_int.proto
new file mode 100644
index 0000000..3fc496e
--- /dev/null
+++ b/internal/proto/json_with_int.proto
@@ -0,0 +1,61 @@
+// Fork of the google.protobuf.Value with explicit support for integer values
+
+syntax = "proto3";
+
+package qdrant;
+
+// `Struct` represents a structured data value, consisting of fields
+// which map to dynamically typed values. In some languages, `Struct`
+// might be supported by a native representation. For example, in
+// scripting languages like JS a struct is represented as an
+// object. The details of that representation are described together
+// with the proto support for the language.
+//
+// The JSON representation for `Struct` is a JSON object.
+message Struct {
+ // Unordered map of dynamically typed values.
+ map fields = 1;
+}
+
+// `Value` represents a dynamically typed value which can be either
+// null, a number, a string, a boolean, a recursive struct value, or a
+// list of values. A producer of value is expected to set one of those
+// variants, absence of any variant indicates an error.
+//
+// The JSON representation for `Value` is a JSON value.
+message Value {
+ // The kind of value.
+ oneof kind {
+ // Represents a null value.
+ NullValue null_value = 1;
+ // Represents a double value.
+ double double_value = 2;
+ // Represents an integer value
+ int64 integer_value = 3;
+ // Represents a string value.
+ string string_value = 4;
+ // Represents a boolean value.
+ bool bool_value = 5;
+ // Represents a structured value.
+ Struct struct_value = 6;
+ // Represents a repeated `Value`.
+ ListValue list_value = 7;
+ }
+}
+
+// `NullValue` is a singleton enumeration to represent the null value for the
+// `Value` type union.
+//
+// The JSON representation for `NullValue` is JSON `null`.
+enum NullValue {
+ // Null value.
+ NULL_VALUE = 0;
+}
+
+// `ListValue` is a wrapper around a repeated field of values.
+//
+// The JSON representation for `ListValue` is a JSON array.
+message ListValue {
+ // Repeated field of dynamically typed values.
+ repeated Value values = 1;
+}
diff --git a/internal/proto/points.proto b/internal/proto/points.proto
new file mode 100644
index 0000000..a34047e
--- /dev/null
+++ b/internal/proto/points.proto
@@ -0,0 +1,965 @@
+syntax = "proto3";
+
+package qdrant;
+
+import "collections.proto";
+import "google/protobuf/timestamp.proto";
+import "json_with_int.proto";
+
+
+enum WriteOrderingType {
+ Weak = 0; // Write operations may be reordered, works faster, default
+ Medium = 1; // Write operations go through dynamically selected leader, may be inconsistent for a short period of time in case of leader change
+ Strong = 2; // Write operations go through the permanent leader, consistent, but may be unavailable if leader is down
+}
+
+message WriteOrdering {
+ WriteOrderingType type = 1; // Write ordering guarantees
+}
+
+enum ReadConsistencyType {
+ All = 0; // Send request to all nodes and return points which are present on all of them
+ Majority = 1; // Send requests to all nodes and return points which are present on majority of them
+ Quorum = 2; // Send requests to half + 1 nodes, return points which are present on all of them
+}
+
+message ReadConsistency {
+ oneof value {
+ ReadConsistencyType type = 1; // Common read consistency configurations
+ uint64 factor = 2; // Send request to a specified number of nodes, and return points which are present on all of them
+ }
+}
+
+// ---------------------------------------------
+// ------------- Point Id Requests -------------
+// ---------------------------------------------
+
+message PointId {
+ oneof point_id_options {
+ uint64 num = 1; // Numerical ID of the point
+ string uuid = 2; // UUID
+ }
+}
+
+message SparseIndices {
+ repeated uint32 data = 1;
+}
+
+// Legacy vector format, which determines the vector type by the configuration of its fields.
+message Vector {
+ repeated float data = 1; // Vector data (flatten for multi vectors)
+ optional SparseIndices indices = 2; // Sparse indices for sparse vectors
+ optional uint32 vectors_count = 3; // Number of vectors per multi vector
+}
+
+message DenseVector {
+ repeated float data = 1;
+}
+
+message SparseVector {
+ repeated float values = 1;
+ repeated uint32 indices = 2;
+}
+
+message MultiDenseVector {
+ repeated DenseVector vectors = 1;
+}
+
+// Vector type to be used in queries. Ids will be substituted with their corresponding vectors from the collection.
+message VectorInput {
+ oneof variant {
+ PointId id = 1;
+ DenseVector dense = 2;
+ SparseVector sparse = 3;
+ MultiDenseVector multi_dense = 4;
+ }
+}
+
+// ---------------------------------------------
+// ----------------- ShardKeySelector ----------
+// ---------------------------------------------
+
+message ShardKeySelector {
+ repeated ShardKey shard_keys = 1; // List of shard keys which should be used in the request
+}
+
+
+// ---------------------------------------------
+// ---------------- RPC Requests ---------------
+// ---------------------------------------------
+
+message UpsertPoints {
+ string collection_name = 1; // name of the collection
+ optional bool wait = 2; // Wait until the changes have been applied?
+ repeated PointStruct points = 3;
+ optional WriteOrdering ordering = 4; // Write ordering guarantees
+ optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys
+}
+
+message DeletePoints {
+ string collection_name = 1; // name of the collection
+ optional bool wait = 2; // Wait until the changes have been applied?
+ PointsSelector points = 3; // Affected points
+ optional WriteOrdering ordering = 4; // Write ordering guarantees
+ optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys
+}
+
+message GetPoints {
+ string collection_name = 1; // name of the collection
+ repeated PointId ids = 2; // List of points to retrieve
+ reserved 3; // deprecated "with_vector" field
+ WithPayloadSelector with_payload = 4; // Options for specifying which payload to include or not
+ optional WithVectorsSelector with_vectors = 5; // Options for specifying which vectors to include into response
+ optional ReadConsistency read_consistency = 6; // Options for specifying read consistency guarantees
+ optional ShardKeySelector shard_key_selector = 7; // Specify in which shards to look for the points, if not specified - look in all shards
+ optional uint64 timeout = 8; // If set, overrides global timeout setting for this request. Unit is seconds.
+}
+
+message UpdatePointVectors {
+ string collection_name = 1; // name of the collection
+ optional bool wait = 2; // Wait until the changes have been applied?
+ repeated PointVectors points = 3; // List of points and vectors to update
+ optional WriteOrdering ordering = 4; // Write ordering guarantees
+ optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys
+}
+
+message PointVectors {
+ PointId id = 1; // ID to update vectors for
+ Vectors vectors = 2; // Named vectors to update, leave others intact
+}
+
+message DeletePointVectors {
+ string collection_name = 1; // name of the collection
+ optional bool wait = 2; // Wait until the changes have been applied?
+ PointsSelector points_selector = 3; // Affected points
+ VectorsSelector vectors = 4; // List of vector names to delete
+ optional WriteOrdering ordering = 5; // Write ordering guarantees
+ optional ShardKeySelector shard_key_selector = 6; // Option for custom sharding to specify used shard keys
+}
+
+message SetPayloadPoints {
+ string collection_name = 1; // name of the collection
+ optional bool wait = 2; // Wait until the changes have been applied?
+ map payload = 3; // New payload values
+ reserved 4; // List of point to modify, deprecated
+ optional PointsSelector points_selector = 5; // Affected points
+ optional WriteOrdering ordering = 6; // Write ordering guarantees
+ optional ShardKeySelector shard_key_selector = 7; // Option for custom sharding to specify used shard keys
+ optional string key = 8; // Option for indicate property of payload
+}
+
+message DeletePayloadPoints {
+ string collection_name = 1; // name of the collection
+ optional bool wait = 2; // Wait until the changes have been applied?
+ repeated string keys = 3; // List of keys to delete
+ reserved 4; // Affected points, deprecated
+ optional PointsSelector points_selector = 5; // Affected points
+ optional WriteOrdering ordering = 6; // Write ordering guarantees
+ optional ShardKeySelector shard_key_selector = 7; // Option for custom sharding to specify used shard keys
+}
+
+message ClearPayloadPoints {
+ string collection_name = 1; // name of the collection
+ optional bool wait = 2; // Wait until the changes have been applied?
+ PointsSelector points = 3; // Affected points
+ optional WriteOrdering ordering = 4; // Write ordering guarantees
+ optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys
+}
+
+enum FieldType {
+ FieldTypeKeyword = 0;
+ FieldTypeInteger = 1;
+ FieldTypeFloat = 2;
+ FieldTypeGeo = 3;
+ FieldTypeText = 4;
+ FieldTypeBool = 5;
+ FieldTypeDatetime = 6;
+ FieldTypeUuid = 7;
+}
+
+message CreateFieldIndexCollection {
+ string collection_name = 1; // name of the collection
+ optional bool wait = 2; // Wait until the changes have been applied?
+ string field_name = 3; // Field name to index
+ optional FieldType field_type = 4; // Field type.
+ optional PayloadIndexParams field_index_params = 5; // Payload index params.
+ optional WriteOrdering ordering = 6; // Write ordering guarantees
+}
+
+message DeleteFieldIndexCollection {
+ string collection_name = 1; // name of the collection
+ optional bool wait = 2; // Wait until the changes have been applied?
+ string field_name = 3; // Field name to delete
+ optional WriteOrdering ordering = 4; // Write ordering guarantees
+}
+
+message PayloadIncludeSelector {
+ repeated string fields = 1; // List of payload keys to include into result
+}
+
+message PayloadExcludeSelector {
+ repeated string fields = 1; // List of payload keys to exclude from the result
+}
+
+message WithPayloadSelector {
+ oneof selector_options {
+ bool enable = 1; // If `true` - return all payload, if `false` - none
+ PayloadIncludeSelector include = 2;
+ PayloadExcludeSelector exclude = 3;
+ }
+}
+
+message NamedVectors {
+ map vectors = 1;
+}
+
+message Vectors {
+ oneof vectors_options {
+ Vector vector = 1;
+ NamedVectors vectors = 2;
+ }
+}
+
+message VectorsSelector {
+ repeated string names = 1; // List of vectors to include into result
+}
+
+message WithVectorsSelector {
+ oneof selector_options {
+ bool enable = 1; // If `true` - return all vectors, if `false` - none
+ VectorsSelector include = 2; // List of payload keys to include into result
+ }
+}
+
+message QuantizationSearchParams {
+ /*
+ If set to true, search will ignore quantized vector data
+ */
+ optional bool ignore = 1;
+
+ /*
+ If true, use original vectors to re-score top-k results. If ignored, qdrant decides automatically does rescore enabled or not.
+ */
+ optional bool rescore = 2;
+
+ /*
+ Oversampling factor for quantization.
+
+ Defines how many extra vectors should be pre-selected using quantized index,
+ and then re-scored using original vectors.
+
+ For example, if `oversampling` is 2.4 and `limit` is 100, then 240 vectors will be pre-selected using quantized index,
+ and then top-100 will be returned after re-scoring.
+ */
+ optional double oversampling = 3;
+}
+
+message SearchParams {
+ /*
+ Params relevant to HNSW index. Size of the beam in a beam-search.
+ Larger the value - more accurate the result, more time required for search.
+ */
+ optional uint64 hnsw_ef = 1;
+
+ /*
+ Search without approximation. If set to true, search may run long but with exact results.
+ */
+ optional bool exact = 2;
+
+ /*
+ If set to true, search will ignore quantized vector data
+ */
+ optional QuantizationSearchParams quantization = 3;
+ /*
+ If enabled, the engine will only perform search among indexed or small segments.
+ Using this option prevents slow searches in case of delayed index, but does not
+ guarantee that all uploaded vectors will be included in search results
+ */
+ optional bool indexed_only = 4;
+}
+
+message SearchPoints {
+ string collection_name = 1; // name of the collection
+ repeated float vector = 2; // vector
+ Filter filter = 3; // Filter conditions - return only those points that satisfy the specified conditions
+ uint64 limit = 4; // Max number of result
+ reserved 5; // deprecated "with_vector" field
+ WithPayloadSelector with_payload = 6; // Options for specifying which payload to include or not
+ SearchParams params = 7; // Search config
+ optional float score_threshold = 8; // If provided - cut off results with worse scores
+ optional uint64 offset = 9; // Offset of the result
+ optional string vector_name = 10; // Which vector to use for search, if not specified - use default vector
+ optional WithVectorsSelector with_vectors = 11; // Options for specifying which vectors to include into response
+ optional ReadConsistency read_consistency = 12; // Options for specifying read consistency guarantees
+ optional uint64 timeout = 13; // If set, overrides global timeout setting for this request. Unit is seconds.
+ optional ShardKeySelector shard_key_selector = 14; // Specify in which shards to look for the points, if not specified - look in all shards
+ optional SparseIndices sparse_indices = 15;
+}
+
+message SearchBatchPoints {
+ string collection_name = 1; // Name of the collection
+ repeated SearchPoints search_points = 2;
+ optional ReadConsistency read_consistency = 3; // Options for specifying read consistency guarantees
+ optional uint64 timeout = 4; // If set, overrides global timeout setting for this request. Unit is seconds.
+}
+
+message WithLookup {
+ string collection = 1; // Name of the collection to use for points lookup
+ optional WithPayloadSelector with_payload = 2; // Options for specifying which payload to include (or not)
+ optional WithVectorsSelector with_vectors = 3; // Options for specifying which vectors to include (or not)
+}
+
+
+message SearchPointGroups {
+ string collection_name = 1; // Name of the collection
+ repeated float vector = 2; // Vector to compare against
+ Filter filter = 3; // Filter conditions - return only those points that satisfy the specified conditions
+ uint32 limit = 4; // Max number of result
+ WithPayloadSelector with_payload = 5; // Options for specifying which payload to include or not
+ SearchParams params = 6; // Search config
+ optional float score_threshold = 7; // If provided - cut off results with worse scores
+ optional string vector_name = 8; // Which vector to use for search, if not specified - use default vector
+ optional WithVectorsSelector with_vectors = 9; // Options for specifying which vectors to include into response
+ string group_by = 10; // Payload field to group by, must be a string or number field. If there are multiple values for the field, all of them will be used. One point can be in multiple groups.
+ uint32 group_size = 11; // Maximum amount of points to return per group
+ optional ReadConsistency read_consistency = 12; // Options for specifying read consistency guarantees
+ optional WithLookup with_lookup = 13; // Options for specifying how to use the group id to lookup points in another collection
+ optional uint64 timeout = 14; // If set, overrides global timeout setting for this request. Unit is seconds.
+ optional ShardKeySelector shard_key_selector = 15; // Specify in which shards to look for the points, if not specified - look in all shards
+ optional SparseIndices sparse_indices = 16;
+}
+
+enum Direction {
+ Asc = 0;
+ Desc = 1;
+}
+
+message StartFrom {
+ oneof value {
+ double float = 1;
+ int64 integer = 2;
+ google.protobuf.Timestamp timestamp = 3;
+ string datetime = 4;
+ }
+}
+
+message OrderBy {
+ string key = 1; // Payload key to order by
+ optional Direction direction = 2; // Ascending or descending order
+ optional StartFrom start_from = 3; // Start from this value
+}
+
+message ScrollPoints {
+ string collection_name = 1;
+ Filter filter = 2; // Filter conditions - return only those points that satisfy the specified conditions
+ optional PointId offset = 3; // Start with this ID
+ optional uint32 limit = 4; // Max number of result
+ reserved 5; // deprecated "with_vector" field
+ WithPayloadSelector with_payload = 6; // Options for specifying which payload to include or not
+ optional WithVectorsSelector with_vectors = 7; // Options for specifying which vectors to include into response
+ optional ReadConsistency read_consistency = 8; // Options for specifying read consistency guarantees
+ optional ShardKeySelector shard_key_selector = 9; // Specify in which shards to look for the points, if not specified - look in all shards
+ optional OrderBy order_by = 10; // Order the records by a payload field
+ optional uint64 timeout = 11; // If set, overrides global timeout setting for this request. Unit is seconds.
+}
+
+// How to use positive and negative vectors to find the results, default is `AverageVector`.
+enum RecommendStrategy {
+ // Average positive and negative vectors and create a single query with the formula
+ // `query = avg_pos + avg_pos - avg_neg`. Then performs normal search.
+ AverageVector = 0;
+
+ // Uses custom search objective. Each candidate is compared against all
+ // examples, its score is then chosen from the `max(max_pos_score, max_neg_score)`.
+ // If the `max_neg_score` is chosen then it is squared and negated.
+ BestScore = 1;
+}
+
+message LookupLocation {
+ string collection_name = 1;
+ optional string vector_name = 2; // Which vector to use for search, if not specified - use default vector
+ optional ShardKeySelector shard_key_selector = 3; // Specify in which shards to look for the points, if not specified - look in all shards
+}
+
+message RecommendPoints {
+ string collection_name = 1; // name of the collection
+ repeated PointId positive = 2; // Look for vectors closest to the vectors from these points
+ repeated PointId negative = 3; // Try to avoid vectors like the vector from these points
+ Filter filter = 4; // Filter conditions - return only those points that satisfy the specified conditions
+ uint64 limit = 5; // Max number of result
+ reserved 6; // deprecated "with_vector" field
+ WithPayloadSelector with_payload = 7; // Options for specifying which payload to include or not
+ SearchParams params = 8; // Search config
+ optional float score_threshold = 9; // If provided - cut off results with worse scores
+ optional uint64 offset = 10; // Offset of the result
+ optional string using = 11; // Define which vector to use for recommendation, if not specified - default vector
+ optional WithVectorsSelector with_vectors = 12; // Options for specifying which vectors to include into response
+ optional LookupLocation lookup_from = 13; // Name of the collection to use for points lookup, if not specified - use current collection
+ optional ReadConsistency read_consistency = 14; // Options for specifying read consistency guarantees
+ optional RecommendStrategy strategy = 16; // How to use the example vectors to find the results
+ repeated Vector positive_vectors = 17; // Look for vectors closest to those
+ repeated Vector negative_vectors = 18; // Try to avoid vectors like this
+ optional uint64 timeout = 19; // If set, overrides global timeout setting for this request. Unit is seconds.
+ optional ShardKeySelector shard_key_selector = 20; // Specify in which shards to look for the points, if not specified - look in all shards
+}
+
+message RecommendBatchPoints {
+ string collection_name = 1; // Name of the collection
+ repeated RecommendPoints recommend_points = 2;
+ optional ReadConsistency read_consistency = 3; // Options for specifying read consistency guarantees
+ optional uint64 timeout = 4; // If set, overrides global timeout setting for this request. Unit is seconds.
+}
+
+message RecommendPointGroups {
+ string collection_name = 1; // Name of the collection
+ repeated PointId positive = 2; // Look for vectors closest to the vectors from these points
+ repeated PointId negative = 3; // Try to avoid vectors like the vector from these points
+ Filter filter = 4; // Filter conditions - return only those points that satisfy the specified conditions
+ uint32 limit = 5; // Max number of groups in result
+ WithPayloadSelector with_payload = 6; // Options for specifying which payload to include or not
+ SearchParams params = 7; // Search config
+ optional float score_threshold = 8; // If provided - cut off results with worse scores
+ optional string using = 9; // Define which vector to use for recommendation, if not specified - default vector
+ optional WithVectorsSelector with_vectors = 10; // Options for specifying which vectors to include into response
+ optional LookupLocation lookup_from = 11; // Name of the collection to use for points lookup, if not specified - use current collection
+ string group_by = 12; // Payload field to group by, must be a string or number field. If there are multiple values for the field, all of them will be used. One point can be in multiple groups.
+ uint32 group_size = 13; // Maximum amount of points to return per group
+ optional ReadConsistency read_consistency = 14; // Options for specifying read consistency guarantees
+ optional WithLookup with_lookup = 15; // Options for specifying how to use the group id to lookup points in another collection
+ optional RecommendStrategy strategy = 17; // How to use the example vectors to find the results
+ repeated Vector positive_vectors = 18; // Look for vectors closest to those
+ repeated Vector negative_vectors = 19; // Try to avoid vectors like this
+ optional uint64 timeout = 20; // If set, overrides global timeout setting for this request. Unit is seconds.
+ optional ShardKeySelector shard_key_selector = 21; // Specify in which shards to look for the points, if not specified - look in all shards
+}
+
+message TargetVector {
+ oneof target {
+ VectorExample single = 1;
+
+ // leaving extensibility for possibly adding multi-target
+ }
+}
+
+message VectorExample {
+ oneof example {
+ PointId id = 1;
+ Vector vector = 2;
+ }
+}
+
+message ContextExamplePair {
+ VectorExample positive = 1;
+ VectorExample negative = 2;
+}
+
+message DiscoverPoints {
+ string collection_name = 1; // name of the collection
+ TargetVector target = 2; // Use this as the primary search objective
+ repeated ContextExamplePair context = 3; // Search will be constrained by these pairs of examples
+ Filter filter = 4; // Filter conditions - return only those points that satisfy the specified conditions
+ uint64 limit = 5; // Max number of result
+ WithPayloadSelector with_payload = 6; // Options for specifying which payload to include or not
+ SearchParams params = 7; // Search config
+ optional uint64 offset = 8; // Offset of the result
+ optional string using = 9; // Define which vector to use for recommendation, if not specified - default vector
+ optional WithVectorsSelector with_vectors = 10; // Options for specifying which vectors to include into response
+ optional LookupLocation lookup_from = 11; // Name of the collection to use for points lookup, if not specified - use current collection
+ optional ReadConsistency read_consistency = 12; // Options for specifying read consistency guarantees
+ optional uint64 timeout = 13; // If set, overrides global timeout setting for this request. Unit is seconds.
+ optional ShardKeySelector shard_key_selector = 14; // Specify in which shards to look for the points, if not specified - look in all shards
+}
+
+message DiscoverBatchPoints {
+ string collection_name = 1; // Name of the collection
+ repeated DiscoverPoints discover_points = 2;
+ optional ReadConsistency read_consistency = 3; // Options for specifying read consistency guarantees
+ optional uint64 timeout = 4; // If set, overrides global timeout setting for this request. Unit is seconds.
+}
+
+message CountPoints {
+ string collection_name = 1; // Name of the collection
+ Filter filter = 2; // Filter conditions - return only those points that satisfy the specified conditions
+ optional bool exact = 3; // If `true` - return exact count, if `false` - return approximate count
+ optional ReadConsistency read_consistency = 4; // Options for specifying read consistency guarantees
+ optional ShardKeySelector shard_key_selector = 5; // Specify in which shards to look for the points, if not specified - look in all shards
+ optional uint64 timeout = 6; // If set, overrides global timeout setting for this request. Unit is seconds.
+}
+
+message RecommendInput {
+ repeated VectorInput positive = 1; // Look for vectors closest to the vectors from these points
+ repeated VectorInput negative = 2; // Try to avoid vectors like the vector from these points
+ optional RecommendStrategy strategy = 3; // How to use the provided vectors to find the results
+}
+
+message ContextInputPair {
+ VectorInput positive = 1; // A positive vector
+ VectorInput negative = 2; // Repel from this vector
+}
+
+message DiscoverInput {
+ VectorInput target = 1; // Use this as the primary search objective
+ ContextInput context = 2; // Search space will be constrained by these pairs of vectors
+}
+
+message ContextInput {
+ repeated ContextInputPair pairs = 1; // Search space will be constrained by these pairs of vectors
+}
+
+enum Fusion {
+ RRF = 0; // Reciprocal Rank Fusion
+ DBSF = 1; // Distribution-Based Score Fusion
+}
+
+// Sample points from the collection
+//
+// Available sampling methods:
+//
+// * `random` - Random sampling
+enum Sample {
+ Random = 0;
+}
+
+message Query {
+ oneof variant {
+ VectorInput nearest = 1; // Find the nearest neighbors to this vector.
+ RecommendInput recommend = 2; // Use multiple positive and negative vectors to find the results.
+ DiscoverInput discover = 3; // Search for nearest points, but constrain the search space with context
+ ContextInput context = 4; // Return points that live in positive areas.
+ OrderBy order_by = 5; // Order the points by a payload field.
+ Fusion fusion = 6; // Fuse the results of multiple prefetches.
+ Sample sample = 7; // Sample points from the collection.
+ }
+}
+
+message PrefetchQuery {
+ repeated PrefetchQuery prefetch = 1; // Sub-requests to perform first. If present, the query will be performed on the results of the prefetches.
+ optional Query query = 2; // Query to perform. If missing, returns points ordered by their IDs.
+ optional string using = 3; // Define which vector to use for querying. If missing, the default vector is is used.
+ optional Filter filter = 4; // Filter conditions - return only those points that satisfy the specified conditions.
+ optional SearchParams params = 5; // Search params for when there is no prefetch.
+ optional float score_threshold = 6; // Return points with scores better than this threshold.
+ optional uint64 limit = 7; // Max number of points. Default is 10
+ optional LookupLocation lookup_from = 8; // The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector
+}
+
+message QueryPoints {
+ string collection_name = 1; // Name of the collection
+ repeated PrefetchQuery prefetch = 2; // Sub-requests to perform first. If present, the query will be performed on the results of the prefetches.
+ optional Query query = 3; // Query to perform. If missing, returns points ordered by their IDs.
+ optional string using = 4; // Define which vector to use for querying. If missing, the default vector is used.
+ optional Filter filter = 5; // Filter conditions - return only those points that satisfy the specified conditions.
+ optional SearchParams params = 6; // Search params for when there is no prefetch.
+ optional float score_threshold = 7; // Return points with scores better than this threshold.
+ optional uint64 limit = 8; // Max number of points. Default is 10.
+ optional uint64 offset = 9; // Offset of the result. Skip this many points. Default is 0.
+ optional WithVectorsSelector with_vectors = 10; // Options for specifying which vectors to include into the response.
+ optional WithPayloadSelector with_payload = 11; // Options for specifying which payload to include or not.
+ optional ReadConsistency read_consistency = 12; // Options for specifying read consistency guarantees.
+ optional ShardKeySelector shard_key_selector = 13; // Specify in which shards to look for the points, if not specified - look in all shards.
+ optional LookupLocation lookup_from = 14; // The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector
+ optional uint64 timeout = 15; // If set, overrides global timeout setting for this request. Unit is seconds.
+}
+
+message QueryBatchPoints {
+ string collection_name = 1;
+ repeated QueryPoints query_points = 2;
+ optional ReadConsistency read_consistency = 3; // Options for specifying read consistency guarantees
+ optional uint64 timeout = 4; // If set, overrides global timeout setting for this request. Unit is seconds.
+}
+
+message QueryPointGroups {
+ string collection_name = 1; // Name of the collection
+ repeated PrefetchQuery prefetch = 2; // Sub-requests to perform first. If present, the query will be performed on the results of the prefetches.
+ optional Query query = 3; // Query to perform. If missing, returns points ordered by their IDs.
+ optional string using = 4; // Define which vector to use for querying. If missing, the default vector is used.
+ optional Filter filter = 5; // Filter conditions - return only those points that satisfy the specified conditions.
+ optional SearchParams params = 6; // Search params for when there is no prefetch.
+ optional float score_threshold = 7; // Return points with scores better than this threshold.
+ WithPayloadSelector with_payload = 8; // Options for specifying which payload to include or not
+ optional WithVectorsSelector with_vectors = 9; // Options for specifying which vectors to include into response
+ optional LookupLocation lookup_from = 10; // The location to use for IDs lookup, if not specified - use the current collection and the 'using' vector
+ optional uint64 limit = 11; // Max number of points. Default is 3.
+ optional uint64 group_size = 12; // Maximum amount of points to return per group. Default to 10.
+ string group_by = 13; // Payload field to group by, must be a string or number field. If there are multiple values for the field, all of them will be used. One point can be in multiple groups.
+ optional ReadConsistency read_consistency = 14; // Options for specifying read consistency guarantees
+ optional WithLookup with_lookup = 15; // Options for specifying how to use the group id to lookup points in another collection
+ optional uint64 timeout = 16; // If set, overrides global timeout setting for this request. Unit is seconds.
+ optional ShardKeySelector shard_key_selector = 17; // Specify in which shards to look for the points, if not specified - look in all shards
+}
+
+message FacetValue {
+ oneof variant {
+ string string_value = 1; // String value from the facet
+ }
+}
+
+message FacetValueHit {
+ FacetValue value = 1; // Value from the facet
+ uint64 count = 2; // Number of points with this value
+}
+
+message PointsUpdateOperation {
+ message PointStructList {
+ repeated PointStruct points = 1;
+ optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys
+ }
+ message SetPayload {
+ map payload = 1;
+ optional PointsSelector points_selector = 2; // Affected points
+ optional ShardKeySelector shard_key_selector = 3; // Option for custom sharding to specify used shard keys
+ optional string key = 4; // Option for indicate property of payload
+ }
+ message OverwritePayload {
+ map payload = 1;
+ optional PointsSelector points_selector = 2; // Affected points
+ optional ShardKeySelector shard_key_selector = 3; // Option for custom sharding to specify used shard keys
+ optional string key = 4; // Option for indicate property of payload
+ }
+ message DeletePayload {
+ repeated string keys = 1;
+ optional PointsSelector points_selector = 2; // Affected points
+ optional ShardKeySelector shard_key_selector = 3; // Option for custom sharding to specify used shard keys
+ }
+ message UpdateVectors {
+ repeated PointVectors points = 1; // List of points and vectors to update
+ optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys
+ }
+ message DeleteVectors {
+ PointsSelector points_selector = 1; // Affected points
+ VectorsSelector vectors = 2; // List of vector names to delete
+ optional ShardKeySelector shard_key_selector = 3; // Option for custom sharding to specify used shard keys
+ }
+ message DeletePoints {
+ PointsSelector points = 1; // Affected points
+ optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys
+ }
+ message ClearPayload {
+ PointsSelector points = 1; // Affected points
+ optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys
+ }
+
+ oneof operation {
+ PointStructList upsert = 1;
+ PointsSelector delete_deprecated = 2 [deprecated=true];
+ SetPayload set_payload = 3;
+ OverwritePayload overwrite_payload = 4;
+ DeletePayload delete_payload = 5;
+ PointsSelector clear_payload_deprecated = 6 [deprecated=true];
+ UpdateVectors update_vectors = 7;
+ DeleteVectors delete_vectors = 8;
+ DeletePoints delete_points = 9;
+ ClearPayload clear_payload = 10;
+ }
+}
+
+message UpdateBatchPoints {
+ string collection_name = 1; // name of the collection
+ optional bool wait = 2; // Wait until the changes have been applied?
+ repeated PointsUpdateOperation operations = 3;
+ optional WriteOrdering ordering = 4; // Write ordering guarantees
+}
+
+// ---------------------------------------------
+// ---------------- RPC Response ---------------
+// ---------------------------------------------
+
+message PointsOperationResponse {
+ UpdateResult result = 1;
+ double time = 2; // Time spent to process
+}
+
+message UpdateResult {
+ optional uint64 operation_id = 1; // Number of operation
+ UpdateStatus status = 2; // Operation status
+}
+
+enum UpdateStatus {
+ UnknownUpdateStatus = 0;
+ Acknowledged = 1; // Update is received, but not processed yet
+ Completed = 2; // Update is applied and ready for search
+ ClockRejected = 3; // Internal: update is rejected due to an outdated clock
+}
+
+message OrderValue {
+ oneof variant {
+ int64 int = 1;
+ double float = 2;
+ }
+}
+
+message ScoredPoint {
+ PointId id = 1; // Point id
+ map payload = 2; // Payload
+ float score = 3; // Similarity score
+ reserved 4; // deprecated "vector" field
+ uint64 version = 5; // Last update operation applied to this point
+ optional Vectors vectors = 6; // Vectors to search
+ optional ShardKey shard_key = 7; // Shard key
+ optional OrderValue order_value = 8; // Order by value
+}
+
+message GroupId {
+ oneof kind {
+ // Represents a double value.
+ uint64 unsigned_value = 1;
+ // Represents an integer value
+ int64 integer_value = 2;
+ // Represents a string value.
+ string string_value = 3;
+ }
+}
+
+message PointGroup {
+ GroupId id = 1; // Group id
+ repeated ScoredPoint hits = 2; // Points in the group
+ RetrievedPoint lookup = 3; // Point(s) from the lookup collection that matches the group id
+}
+
+message GroupsResult {
+ repeated PointGroup groups = 1; // Groups
+}
+
+message SearchResponse {
+ repeated ScoredPoint result = 1;
+ double time = 2; // Time spent to process
+}
+
+message QueryResponse {
+ repeated ScoredPoint result = 1;
+ double time = 2; // Time spent to process
+}
+
+message QueryBatchResponse {
+ repeated BatchResult result = 1;
+ double time = 2; // Time spent to process
+}
+
+message QueryGroupsResponse {
+ GroupsResult result = 1;
+ double time = 2; // Time spent to process
+}
+
+message BatchResult {
+ repeated ScoredPoint result = 1;
+}
+
+message SearchBatchResponse {
+ repeated BatchResult result = 1;
+ double time = 2; // Time spent to process
+}
+
+message SearchGroupsResponse {
+ GroupsResult result = 1;
+ double time = 2; // Time spent to process
+}
+
+message CountResponse {
+ CountResult result = 1;
+ double time = 2; // Time spent to process
+}
+
+message ScrollResponse {
+ optional PointId next_page_offset = 1; // Use this offset for the next query
+ repeated RetrievedPoint result = 2;
+ double time = 3; // Time spent to process
+}
+
+message CountResult {
+ uint64 count = 1;
+}
+
+message RetrievedPoint {
+ PointId id = 1;
+ map payload = 2;
+ reserved 3; // deprecated "vector" field
+ optional Vectors vectors = 4;
+ optional ShardKey shard_key = 5; // Shard key
+ optional OrderValue order_value = 6; // Order-by value
+}
+
+message GetResponse {
+ repeated RetrievedPoint result = 1;
+ double time = 2; // Time spent to process
+}
+
+message RecommendResponse {
+ repeated ScoredPoint result = 1;
+ double time = 2; // Time spent to process
+}
+
+message RecommendBatchResponse {
+ repeated BatchResult result = 1;
+ double time = 2; // Time spent to process
+}
+
+message DiscoverResponse {
+ repeated ScoredPoint result = 1;
+ double time = 2; // Time spent to process
+}
+
+message DiscoverBatchResponse {
+ repeated BatchResult result = 1;
+ double time = 2; // Time spent to process
+}
+
+message RecommendGroupsResponse {
+ GroupsResult result = 1;
+ double time = 2; // Time spent to process
+}
+
+message UpdateBatchResponse {
+ repeated UpdateResult result = 1;
+ double time = 2; // Time spent to process
+}
+
+// ---------------------------------------------
+// ------------- Filter Conditions -------------
+// ---------------------------------------------
+
+message Filter {
+ repeated Condition should = 1; // At least one of those conditions should match
+ repeated Condition must = 2; // All conditions must match
+ repeated Condition must_not = 3; // All conditions must NOT match
+ optional MinShould min_should = 4; // At least minimum amount of given conditions should match
+}
+
+message MinShould {
+ repeated Condition conditions = 1;
+ uint64 min_count = 2;
+}
+
+message Condition {
+ oneof condition_one_of {
+ FieldCondition field = 1;
+ IsEmptyCondition is_empty = 2;
+ HasIdCondition has_id = 3;
+ Filter filter = 4;
+ IsNullCondition is_null = 5;
+ NestedCondition nested = 6;
+ }
+}
+
+message IsEmptyCondition {
+ string key = 1;
+}
+
+message IsNullCondition {
+ string key = 1;
+}
+
+message HasIdCondition {
+ repeated PointId has_id = 1;
+}
+
+message NestedCondition {
+ string key = 1; // Path to nested object
+ Filter filter = 2; // Filter condition
+}
+
+message FieldCondition {
+ string key = 1;
+ Match match = 2; // Check if point has field with a given value
+ Range range = 3; // Check if points value lies in a given range
+ GeoBoundingBox geo_bounding_box = 4; // Check if points geolocation lies in a given area
+ GeoRadius geo_radius = 5; // Check if geo point is within a given radius
+ ValuesCount values_count = 6; // Check number of values for a specific field
+ GeoPolygon geo_polygon = 7; // Check if geo point is within a given polygon
+ DatetimeRange datetime_range = 8; // Check if datetime is within a given range
+}
+
+message Match {
+ oneof match_value {
+ string keyword = 1; // Match string keyword
+ int64 integer = 2; // Match integer
+ bool boolean = 3; // Match boolean
+ string text = 4; // Match text
+ RepeatedStrings keywords = 5; // Match multiple keywords
+ RepeatedIntegers integers = 6; // Match multiple integers
+ RepeatedIntegers except_integers = 7; // Match any other value except those integers
+ RepeatedStrings except_keywords = 8; // Match any other value except those keywords
+ }
+}
+
+message RepeatedStrings {
+ repeated string strings = 1;
+}
+
+message RepeatedIntegers {
+ repeated int64 integers = 1;
+}
+
+message Range {
+ optional double lt = 1;
+ optional double gt = 2;
+ optional double gte = 3;
+ optional double lte = 4;
+}
+
+message DatetimeRange {
+ optional google.protobuf.Timestamp lt = 1;
+ optional google.protobuf.Timestamp gt = 2;
+ optional google.protobuf.Timestamp gte = 3;
+ optional google.protobuf.Timestamp lte = 4;
+}
+
+message GeoBoundingBox {
+ GeoPoint top_left = 1; // north-west corner
+ GeoPoint bottom_right = 2; // south-east corner
+}
+
+message GeoRadius {
+ GeoPoint center = 1; // Center of the circle
+ float radius = 2; // In meters
+}
+
+message GeoLineString {
+ repeated GeoPoint points = 1; // Ordered sequence of GeoPoints representing the line
+}
+
+// For a valid GeoPolygon, both the exterior and interior GeoLineStrings must consist of a minimum of 4 points.
+// Additionally, the first and last points of each GeoLineString must be the same.
+message GeoPolygon {
+ GeoLineString exterior = 1; // The exterior line bounds the surface
+ repeated GeoLineString interiors = 2; // Interior lines (if present) bound holes within the surface
+}
+
+message ValuesCount {
+ optional uint64 lt = 1;
+ optional uint64 gt = 2;
+ optional uint64 gte = 3;
+ optional uint64 lte = 4;
+}
+
+// ---------------------------------------------
+// -------------- Points Selector --------------
+// ---------------------------------------------
+
+message PointsSelector {
+ oneof points_selector_one_of {
+ PointsIdsList points = 1;
+ Filter filter = 2;
+ }
+}
+
+message PointsIdsList {
+ repeated PointId ids = 1;
+}
+
+// ---------------------------------------------
+// ------------------- Point -------------------
+// ---------------------------------------------
+
+
+message PointStruct {
+ PointId id = 1;
+ reserved 2; // deprecated "vector" field
+ map payload = 3;
+ optional Vectors vectors = 4;
+}
+
+
+message GeoPoint {
+ double lon = 1;
+ double lat = 2;
+}
diff --git a/internal/proto/points_service.proto b/internal/proto/points_service.proto
new file mode 100644
index 0000000..e88f0fd
--- /dev/null
+++ b/internal/proto/points_service.proto
@@ -0,0 +1,123 @@
+syntax = "proto3";
+
+import "points.proto";
+
+package qdrant;
+
+service Points {
+ /*
+ Perform insert + updates on points. If a point with a given ID already exists - it will be overwritten.
+ */
+ rpc Upsert (UpsertPoints) returns (PointsOperationResponse) {}
+ /*
+ Delete points
+ */
+ rpc Delete (DeletePoints) returns (PointsOperationResponse) {}
+ /*
+ Retrieve points
+ */
+ rpc Get (GetPoints) returns (GetResponse) {}
+ /*
+ Update named vectors for point
+ */
+ rpc UpdateVectors (UpdatePointVectors) returns (PointsOperationResponse) {}
+ /*
+ Delete named vectors for points
+ */
+ rpc DeleteVectors (DeletePointVectors) returns (PointsOperationResponse) {}
+ /*
+ Set payload for points
+ */
+ rpc SetPayload (SetPayloadPoints) returns (PointsOperationResponse) {}
+ /*
+ Overwrite payload for points
+ */
+ rpc OverwritePayload (SetPayloadPoints) returns (PointsOperationResponse) {}
+ /*
+ Delete specified key payload for points
+ */
+ rpc DeletePayload (DeletePayloadPoints) returns (PointsOperationResponse) {}
+ /*
+ Remove all payload for specified points
+ */
+ rpc ClearPayload (ClearPayloadPoints) returns (PointsOperationResponse) {}
+ /*
+ Create index for field in collection
+ */
+ rpc CreateFieldIndex (CreateFieldIndexCollection) returns (PointsOperationResponse) {}
+ /*
+ Delete field index for collection
+ */
+ rpc DeleteFieldIndex (DeleteFieldIndexCollection) returns (PointsOperationResponse) {}
+ /*
+ Retrieve closest points based on vector similarity and given filtering conditions
+ */
+ rpc Search (SearchPoints) returns (SearchResponse) {}
+ /*
+ Retrieve closest points based on vector similarity and given filtering conditions
+ */
+ rpc SearchBatch (SearchBatchPoints) returns (SearchBatchResponse) {}
+ /*
+ Retrieve closest points based on vector similarity and given filtering conditions, grouped by a given field
+ */
+ rpc SearchGroups (SearchPointGroups) returns (SearchGroupsResponse) {}
+ /*
+ Iterate over all or filtered points
+ */
+ rpc Scroll (ScrollPoints) returns (ScrollResponse) {}
+ /*
+ Look for the points which are closer to stored positive examples and at the same time further to negative examples.
+ */
+ rpc Recommend (RecommendPoints) returns (RecommendResponse) {}
+ /*
+ Look for the points which are closer to stored positive examples and at the same time further to negative examples.
+ */
+ rpc RecommendBatch (RecommendBatchPoints) returns (RecommendBatchResponse) {}
+ /*
+ Look for the points which are closer to stored positive examples and at the same time further to negative examples, grouped by a given field
+ */
+ rpc RecommendGroups (RecommendPointGroups) returns (RecommendGroupsResponse) {}
+ /*
+ Use context and a target to find the most similar points to the target, constrained by the context.
+
+ When using only the context (without a target), a special search - called context search - is performed where
+ pairs of points are used to generate a loss that guides the search towards the zone where
+ most positive examples overlap. This means that the score minimizes the scenario of
+ finding a point closer to a negative than to a positive part of a pair.
+
+ Since the score of a context relates to loss, the maximum score a point can get is 0.0,
+ and it becomes normal that many points can have a score of 0.0.
+
+ When using target (with or without context), the score behaves a little different: The
+ integer part of the score represents the rank with respect to the context, while the
+ decimal part of the score relates to the distance to the target. The context part of the score for
+ each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair,
+ and -1 otherwise.
+ */
+ rpc Discover (DiscoverPoints) returns (DiscoverResponse) {}
+ /*
+ Batch request points based on { positive, negative } pairs of examples, and/or a target
+ */
+ rpc DiscoverBatch (DiscoverBatchPoints) returns (DiscoverBatchResponse) {}
+ /*
+ Count points in collection with given filtering conditions
+ */
+ rpc Count (CountPoints) returns (CountResponse) {}
+
+ /*
+ Perform multiple update operations in one request
+ */
+ rpc UpdateBatch (UpdateBatchPoints) returns (UpdateBatchResponse) {}
+ /*
+ Universally query points. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries.
+ */
+ rpc Query (QueryPoints) returns (QueryResponse) {}
+ /*
+ Universally query points in a batch fashion. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries.
+ */
+ rpc QueryBatch (QueryBatchPoints) returns (QueryBatchResponse) {}
+ /*
+ Universally query points in a group fashion. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries.
+ */
+ rpc QueryGroups (QueryPointGroups) returns (QueryGroupsResponse) {}
+}
diff --git a/internal/proto/qdrant.proto b/internal/proto/qdrant.proto
new file mode 100644
index 0000000..f6990d6
--- /dev/null
+++ b/internal/proto/qdrant.proto
@@ -0,0 +1,19 @@
+syntax = "proto3";
+
+import "collections_service.proto";
+import "points_service.proto";
+import "snapshots_service.proto";
+
+package qdrant;
+
+service Qdrant {
+ rpc HealthCheck (HealthCheckRequest) returns (HealthCheckReply) {}
+}
+
+message HealthCheckRequest {}
+
+message HealthCheckReply {
+ string title = 1;
+ string version = 2;
+ optional string commit = 3;
+}
diff --git a/internal/proto/snapshots_service.proto b/internal/proto/snapshots_service.proto
new file mode 100644
index 0000000..e5a3c1a
--- /dev/null
+++ b/internal/proto/snapshots_service.proto
@@ -0,0 +1,74 @@
+syntax = "proto3";
+
+package qdrant;
+
+import "google/protobuf/timestamp.proto";
+
+service Snapshots {
+ /*
+ Create collection snapshot
+ */
+ rpc Create (CreateSnapshotRequest) returns (CreateSnapshotResponse) {}
+ /*
+ List collection snapshots
+ */
+ rpc List (ListSnapshotsRequest) returns (ListSnapshotsResponse) {}
+ /*
+ Delete collection snapshot
+ */
+ rpc Delete (DeleteSnapshotRequest) returns (DeleteSnapshotResponse) {}
+ /*
+ Create full storage snapshot
+ */
+ rpc CreateFull (CreateFullSnapshotRequest) returns (CreateSnapshotResponse) {}
+ /*
+ List full storage snapshots
+ */
+ rpc ListFull (ListFullSnapshotsRequest) returns (ListSnapshotsResponse) {}
+ /*
+ Delete full storage snapshot
+ */
+ rpc DeleteFull (DeleteFullSnapshotRequest) returns (DeleteSnapshotResponse) {}
+}
+
+message CreateFullSnapshotRequest {}
+
+message ListFullSnapshotsRequest {}
+
+message DeleteFullSnapshotRequest {
+ string snapshot_name = 1; // Name of the full snapshot
+}
+
+message CreateSnapshotRequest {
+ string collection_name = 1; // Name of the collection
+}
+
+message ListSnapshotsRequest {
+ string collection_name = 1; // Name of the collection
+}
+
+message DeleteSnapshotRequest {
+ string collection_name = 1; // Name of the collection
+ string snapshot_name = 2; // Name of the collection snapshot
+}
+
+message SnapshotDescription {
+ string name = 1; // Name of the snapshot
+ google.protobuf.Timestamp creation_time = 2; // Creation time of the snapshot
+ int64 size = 3; // Size of the snapshot in bytes
+ optional string checksum = 4; // SHA256 digest of the snapshot file
+}
+
+message CreateSnapshotResponse {
+ SnapshotDescription snapshot_description = 1;
+ double time = 2; // Time spent to process
+}
+
+message ListSnapshotsResponse {
+ repeated SnapshotDescription snapshot_descriptions = 1;
+ double time = 2; // Time spent to process
+}
+
+message DeleteSnapshotResponse {
+ double time = 1; // Time spent to process
+}
diff --git a/qdrant/client.go b/qdrant/client.go
new file mode 100644
index 0000000..f8098b4
--- /dev/null
+++ b/qdrant/client.go
@@ -0,0 +1,74 @@
+package qdrant
+
+import (
+ "google.golang.org/grpc"
+)
+
+// High-level client for interacting with a Qdrant server.
+type Client struct {
+ grpcClient *GrpcClient
+}
+
+// Instantiates a new client with the given configuration.
+func NewClient(config *Config) (*Client, error) {
+ grpcClient, err := NewGrpcClient(config)
+ if err != nil {
+ return nil, err
+ }
+ return NewClientFromGrpc(grpcClient), nil
+}
+
+// Instantiates a new client with the default configuration.
+// Connects to localhost:6334 with TLS disabled.
+func DefaultClient() (*Client, error) {
+ grpcClient, err := NewDefaultGrpcClient()
+ if err != nil {
+ return nil, err
+ }
+ return NewClientFromGrpc(grpcClient), err
+}
+
+// Instantiates a new client from an existing gRPC client.
+func NewClientFromGrpc(grpcClient *GrpcClient) *Client {
+ return &Client{
+ grpcClient,
+ }
+}
+
+// Get the underlying gRPC client.
+func (c *Client) GetGrpcClient() *GrpcClient {
+ return c.grpcClient
+}
+
+// Get the low-level client for the collections gRPC service.
+// https://github.com/qdrant/qdrant/blob/master/lib/api/src/grpc/proto/collections_service.proto
+func (c *Client) GetCollectionsClient() CollectionsClient {
+ return c.GetGrpcClient().Collections()
+}
+
+// Get the low-level client for the points gRPC service.
+// https://github.com/qdrant/qdrant/blob/master/lib/api/src/grpc/proto/points_service.proto
+func (c *Client) GetPointsClient() PointsClient {
+ return c.GetGrpcClient().Points()
+}
+
+// Get the low-level client for the snapshots gRPC service.
+// https://github.com/qdrant/qdrant/blob/master/lib/api/src/grpc/proto/snapshots_service.proto
+func (c *Client) GetSnapshotsClient() SnapshotsClient {
+ return c.GetGrpcClient().Snapshots()
+}
+
+// Get the low-level client for the Qdrant gRPC service.
+// https://github.com/qdrant/qdrant/blob/master/lib/api/src/grpc/proto/qdrant.proto
+func (c *Client) GetQdrantClient() QdrantClient {
+ return c.GetGrpcClient().Qdrant()
+}
+
+// Get the underlying *grpc.ClientConn.
+func (c *Client) GetConnection() *grpc.ClientConn {
+ return c.GetGrpcClient().Conn()
+}
+
+func (c *Client) Close() error {
+ return c.grpcClient.Close()
+}
diff --git a/qdrant/collections.go b/qdrant/collections.go
new file mode 100644
index 0000000..190eeac
--- /dev/null
+++ b/qdrant/collections.go
@@ -0,0 +1,296 @@
+package qdrant
+
+import (
+ "context"
+ "errors"
+)
+
+// Checks the existence of a collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - collectionName: The name of the collection to check.
+//
+// Returns:
+// - bool: True if the collection exists, false otherwise.
+// - error: An error if the operation fails.
+func (c *Client) CollectionExists(ctx context.Context, collectionName string) (bool, error) {
+ resp, err := c.GetCollectionsClient().CollectionExists(ctx, &CollectionExistsRequest{
+ CollectionName: collectionName,
+ })
+ if err != nil {
+ return false, newQdrantErr(err, "CollectionExists", collectionName)
+ }
+ return resp.GetResult().GetExists(), nil
+}
+
+// Retrieves detailed information about a specified existing collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - collectionName: The name of the collection to retrieve information for.
+//
+// Returns:
+// - *CollectionInfo: Detailed information about the collection.
+// - error: An error if the operation fails.
+func (c *Client) GetCollectionInfo(ctx context.Context, collectionName string) (*CollectionInfo, error) {
+ resp, err := c.GetCollectionsClient().Get(ctx, &GetCollectionInfoRequest{
+ CollectionName: collectionName,
+ })
+ if err != nil {
+ return nil, newQdrantErr(err, "GetCollection", collectionName)
+ }
+ return resp.GetResult(), nil
+}
+
+// Retrieves the names of all existing collections.
+//
+// Parameters:
+// - ctx: The context for the request.
+//
+// Returns:
+// - []string: A slice of collection names.
+// - error: An error if the operation fails.
+func (c *Client) ListCollections(ctx context.Context) ([]string, error) {
+ resp, err := c.GetCollectionsClient().List(ctx, &ListCollectionsRequest{})
+ if err != nil {
+ return nil, newQdrantErr(err, "ListCollections")
+ }
+ var collections []string
+ for _, collection := range resp.GetCollections() {
+ collections = append(collections, collection.GetName())
+ }
+ return collections, nil
+}
+
+// Creates a new collection with the given parameters.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The CreateCollection request containing collection parameters.
+//
+// Returns:
+// - error: An error if the operation fails.
+func (c *Client) CreateCollection(ctx context.Context, request *CreateCollection) error {
+ _, err := c.GetCollectionsClient().Create(ctx, request)
+ if err != nil {
+ return newQdrantErr(err, "CreateCollection", request.GetCollectionName())
+ }
+ return nil
+}
+
+// Updates parameters of an existing collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The UpdateCollection request containing updated parameters.
+//
+// Returns:
+// - error: An error if the operation fails.
+func (c *Client) UpdateCollection(ctx context.Context, request *UpdateCollection) error {
+ _, err := c.GetCollectionsClient().Update(ctx, request)
+ if err != nil {
+ return newQdrantErr(err, "UpdateCollection", request.GetCollectionName())
+ }
+ return nil
+}
+
+// Drops a collection and all associated data.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - collectionName: The name of the collection to delete.
+//
+// Returns:
+// - error: An error if the operation fails.
+func (c *Client) DeleteCollection(ctx context.Context, collectionName string) error {
+ res, err := c.GetCollectionsClient().Delete(ctx, &DeleteCollection{
+ CollectionName: collectionName,
+ })
+ if err != nil {
+ return newQdrantErr(err, "DeleteCollection", collectionName)
+ }
+ if !res.GetResult() {
+ return newQdrantErr(errors.New("failed to delete collection"), "DeleteCollection", collectionName)
+ }
+ return nil
+}
+
+// Creates an alias for a collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - aliasName: The name of the alias to create.
+// - collectionName: The name of the collection to alias.
+//
+// Returns:
+// - error: An error if the operation fails.
+func (c *Client) CreateAlias(ctx context.Context, aliasName, collectionName string) error {
+ _, err := c.GetCollectionsClient().UpdateAliases(ctx, &ChangeAliases{
+ Actions: []*AliasOperations{
+ {
+ Action: &AliasOperations_CreateAlias{
+ CreateAlias: &CreateAlias{
+ CollectionName: collectionName,
+ AliasName: aliasName,
+ },
+ },
+ },
+ },
+ })
+ if err != nil {
+ return newQdrantErr(err, "CreateAlias", collectionName)
+ }
+ return nil
+}
+
+// Deletes an alias.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - aliasName: The name of the alias to delete.
+//
+// Returns:
+// - error: An error if the operation fails.
+func (c *Client) DeleteAlias(ctx context.Context, aliasName string) error {
+ _, err := c.GetCollectionsClient().UpdateAliases(ctx, &ChangeAliases{
+ Actions: []*AliasOperations{
+ {
+ Action: &AliasOperations_DeleteAlias{
+ DeleteAlias: &DeleteAlias{
+ AliasName: aliasName,
+ },
+ },
+ },
+ },
+ })
+ if err != nil {
+ return newQdrantErr(err, "DeleteAlias", aliasName)
+ }
+ return nil
+}
+
+// Renames an alias.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - oldAliasName: The current name of the alias.
+// - newAliasName: The new name for the alias.
+//
+// Returns:
+// - error: An error if the operation fails.
+func (c *Client) RenameAlias(ctx context.Context, oldAliasName, newAliasName string) error {
+ _, err := c.GetCollectionsClient().UpdateAliases(ctx, &ChangeAliases{
+ Actions: []*AliasOperations{
+ {
+ Action: &AliasOperations_RenameAlias{
+ RenameAlias: &RenameAlias{
+ OldAliasName: oldAliasName,
+ NewAliasName: newAliasName,
+ },
+ },
+ },
+ },
+ })
+ if err != nil {
+ return newQdrantErr(err, "RenameAlias")
+ }
+ return nil
+}
+
+// Lists all aliases for a collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - collectionName: The name of the collection to list aliases for.
+//
+// Returns:
+// - []string: A slice of alias names.
+// - error: An error if the operation fails.
+func (c *Client) ListCollectionAliases(ctx context.Context, collectionName string) ([]string, error) {
+ resp, err := c.GetCollectionsClient().ListCollectionAliases(ctx, &ListCollectionAliasesRequest{
+ CollectionName: collectionName,
+ })
+ if err != nil {
+ return nil, newQdrantErr(err, "ListCollectionAliases", collectionName)
+ }
+ var aliases []string
+ for _, alias := range resp.GetAliases() {
+ aliases = append(aliases, alias.GetAliasName())
+ }
+ return aliases, nil
+}
+
+// Lists all aliases.
+//
+// Parameters:
+// - ctx: The context for the request.
+//
+// Returns:
+// - []*AliasDescription: A slice of AliasDescription objects.
+// - error: An error if the operation fails.
+func (c *Client) ListAliases(ctx context.Context) ([]*AliasDescription, error) {
+ resp, err := c.GetCollectionsClient().ListAliases(ctx, &ListAliasesRequest{})
+ if err != nil {
+ return nil, newQdrantErr(err, "ListAliases")
+ }
+ return resp.GetAliases(), nil
+}
+
+// Updates aliases.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - actions: A slice of AliasOperations to perform.
+//
+// Returns:
+// - error: An error if the operation fails.
+func (c *Client) UpdateAliases(ctx context.Context, actions []*AliasOperations) error {
+ _, err := c.GetCollectionsClient().UpdateAliases(ctx, &ChangeAliases{
+ Actions: actions,
+ })
+ if err != nil {
+ return newQdrantErr(err, "UpdateAliases")
+ }
+ return nil
+}
+
+// Creates a shard key for a collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - collectionName: The name of the collection to create a shard key for.
+// - request: The CreateShardKey request containing shard key parameters.
+//
+// Returns:
+// - error: An error if the operation fails.
+func (c *Client) CreateShardKey(ctx context.Context, collectionName string, request *CreateShardKey) error {
+ _, err := c.GetCollectionsClient().CreateShardKey(ctx, &CreateShardKeyRequest{
+ CollectionName: collectionName,
+ Request: request,
+ })
+ if err != nil {
+ return newQdrantErr(err, "CreateShardKey", collectionName)
+ }
+ return nil
+}
+
+// Deletes a shard key for a collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - collectionName: The name of the collection to delete a shard key from.
+// - request: The DeleteShardKey request containing shard key parameters.
+//
+// Returns:
+// - error: An error if the operation fails.
+func (c *Client) DeleteShardKey(ctx context.Context, collectionName string, request *DeleteShardKey) error {
+ _, err := c.GetCollectionsClient().DeleteShardKey(ctx, &DeleteShardKeyRequest{
+ CollectionName: collectionName,
+ Request: request,
+ })
+ if err != nil {
+ return newQdrantErr(err, "DeleteShardKey", collectionName)
+ }
+ return nil
+}
diff --git a/qdrant/collections.pb.go b/qdrant/collections.pb.go
index 0a5c07d..95b1398 100644
--- a/qdrant/collections.pb.go
+++ b/qdrant/collections.pb.go
@@ -1,10 +1,10 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.26.0
-// protoc v4.22.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.1
// source: collections.proto
-package go_client
+package qdrant
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -6712,7 +6712,7 @@ func file_collections_proto_rawDescGZIP() []byte {
var file_collections_proto_enumTypes = make([]protoimpl.EnumInfo, 12)
var file_collections_proto_msgTypes = make([]protoimpl.MessageInfo, 79)
-var file_collections_proto_goTypes = []interface{}{
+var file_collections_proto_goTypes = []any{
(Datatype)(0), // 0: qdrant.Datatype
(Modifier)(0), // 1: qdrant.Modifier
(MultiVectorComparator)(0), // 2: qdrant.MultiVectorComparator
@@ -6916,7 +6916,7 @@ func file_collections_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_collections_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*VectorParams); i {
case 0:
return &v.state
@@ -6928,7 +6928,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*VectorParamsDiff); i {
case 0:
return &v.state
@@ -6940,7 +6940,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*VectorParamsMap); i {
case 0:
return &v.state
@@ -6952,7 +6952,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*VectorParamsDiffMap); i {
case 0:
return &v.state
@@ -6964,7 +6964,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*VectorsConfig); i {
case 0:
return &v.state
@@ -6976,7 +6976,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*VectorsConfigDiff); i {
case 0:
return &v.state
@@ -6988,7 +6988,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*SparseVectorParams); i {
case 0:
return &v.state
@@ -7000,7 +7000,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*SparseVectorConfig); i {
case 0:
return &v.state
@@ -7012,7 +7012,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*MultiVectorConfig); i {
case 0:
return &v.state
@@ -7024,7 +7024,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*GetCollectionInfoRequest); i {
case 0:
return &v.state
@@ -7036,7 +7036,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*CollectionExistsRequest); i {
case 0:
return &v.state
@@ -7048,7 +7048,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[11].Exporter = func(v any, i int) any {
switch v := v.(*CollectionExists); i {
case 0:
return &v.state
@@ -7060,7 +7060,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[12].Exporter = func(v any, i int) any {
switch v := v.(*CollectionExistsResponse); i {
case 0:
return &v.state
@@ -7072,7 +7072,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[13].Exporter = func(v any, i int) any {
switch v := v.(*ListCollectionsRequest); i {
case 0:
return &v.state
@@ -7084,7 +7084,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[14].Exporter = func(v any, i int) any {
switch v := v.(*CollectionDescription); i {
case 0:
return &v.state
@@ -7096,7 +7096,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[15].Exporter = func(v any, i int) any {
switch v := v.(*GetCollectionInfoResponse); i {
case 0:
return &v.state
@@ -7108,7 +7108,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[16].Exporter = func(v any, i int) any {
switch v := v.(*ListCollectionsResponse); i {
case 0:
return &v.state
@@ -7120,7 +7120,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[17].Exporter = func(v any, i int) any {
switch v := v.(*OptimizerStatus); i {
case 0:
return &v.state
@@ -7132,7 +7132,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[18].Exporter = func(v any, i int) any {
switch v := v.(*HnswConfigDiff); i {
case 0:
return &v.state
@@ -7144,7 +7144,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[19].Exporter = func(v any, i int) any {
switch v := v.(*SparseIndexConfig); i {
case 0:
return &v.state
@@ -7156,7 +7156,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[20].Exporter = func(v any, i int) any {
switch v := v.(*WalConfigDiff); i {
case 0:
return &v.state
@@ -7168,7 +7168,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[21].Exporter = func(v any, i int) any {
switch v := v.(*OptimizersConfigDiff); i {
case 0:
return &v.state
@@ -7180,7 +7180,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[22].Exporter = func(v any, i int) any {
switch v := v.(*ScalarQuantization); i {
case 0:
return &v.state
@@ -7192,7 +7192,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[23].Exporter = func(v any, i int) any {
switch v := v.(*ProductQuantization); i {
case 0:
return &v.state
@@ -7204,7 +7204,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[24].Exporter = func(v any, i int) any {
switch v := v.(*BinaryQuantization); i {
case 0:
return &v.state
@@ -7216,7 +7216,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[25].Exporter = func(v any, i int) any {
switch v := v.(*QuantizationConfig); i {
case 0:
return &v.state
@@ -7228,7 +7228,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[26].Exporter = func(v any, i int) any {
switch v := v.(*Disabled); i {
case 0:
return &v.state
@@ -7240,7 +7240,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[27].Exporter = func(v any, i int) any {
switch v := v.(*QuantizationConfigDiff); i {
case 0:
return &v.state
@@ -7252,7 +7252,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[28].Exporter = func(v any, i int) any {
switch v := v.(*CreateCollection); i {
case 0:
return &v.state
@@ -7264,7 +7264,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[29].Exporter = func(v any, i int) any {
switch v := v.(*UpdateCollection); i {
case 0:
return &v.state
@@ -7276,7 +7276,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[30].Exporter = func(v any, i int) any {
switch v := v.(*DeleteCollection); i {
case 0:
return &v.state
@@ -7288,7 +7288,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[31].Exporter = func(v any, i int) any {
switch v := v.(*CollectionOperationResponse); i {
case 0:
return &v.state
@@ -7300,7 +7300,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[32].Exporter = func(v any, i int) any {
switch v := v.(*CollectionParams); i {
case 0:
return &v.state
@@ -7312,7 +7312,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[33].Exporter = func(v any, i int) any {
switch v := v.(*CollectionParamsDiff); i {
case 0:
return &v.state
@@ -7324,7 +7324,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[34].Exporter = func(v any, i int) any {
switch v := v.(*CollectionConfig); i {
case 0:
return &v.state
@@ -7336,7 +7336,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[35].Exporter = func(v any, i int) any {
switch v := v.(*KeywordIndexParams); i {
case 0:
return &v.state
@@ -7348,7 +7348,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[36].Exporter = func(v any, i int) any {
switch v := v.(*IntegerIndexParams); i {
case 0:
return &v.state
@@ -7360,7 +7360,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[37].Exporter = func(v any, i int) any {
switch v := v.(*FloatIndexParams); i {
case 0:
return &v.state
@@ -7372,7 +7372,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[38].Exporter = func(v any, i int) any {
switch v := v.(*GeoIndexParams); i {
case 0:
return &v.state
@@ -7384,7 +7384,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[39].Exporter = func(v any, i int) any {
switch v := v.(*TextIndexParams); i {
case 0:
return &v.state
@@ -7396,7 +7396,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[40].Exporter = func(v any, i int) any {
switch v := v.(*BoolIndexParams); i {
case 0:
return &v.state
@@ -7408,7 +7408,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[41].Exporter = func(v any, i int) any {
switch v := v.(*DatetimeIndexParams); i {
case 0:
return &v.state
@@ -7420,7 +7420,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[42].Exporter = func(v any, i int) any {
switch v := v.(*UuidIndexParams); i {
case 0:
return &v.state
@@ -7432,7 +7432,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[43].Exporter = func(v any, i int) any {
switch v := v.(*PayloadIndexParams); i {
case 0:
return &v.state
@@ -7444,7 +7444,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[44].Exporter = func(v any, i int) any {
switch v := v.(*PayloadSchemaInfo); i {
case 0:
return &v.state
@@ -7456,7 +7456,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[45].Exporter = func(v any, i int) any {
switch v := v.(*CollectionInfo); i {
case 0:
return &v.state
@@ -7468,7 +7468,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[46].Exporter = func(v any, i int) any {
switch v := v.(*ChangeAliases); i {
case 0:
return &v.state
@@ -7480,7 +7480,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[47].Exporter = func(v any, i int) any {
switch v := v.(*AliasOperations); i {
case 0:
return &v.state
@@ -7492,7 +7492,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[48].Exporter = func(v any, i int) any {
switch v := v.(*CreateAlias); i {
case 0:
return &v.state
@@ -7504,7 +7504,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[49].Exporter = func(v any, i int) any {
switch v := v.(*RenameAlias); i {
case 0:
return &v.state
@@ -7516,7 +7516,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[50].Exporter = func(v any, i int) any {
switch v := v.(*DeleteAlias); i {
case 0:
return &v.state
@@ -7528,7 +7528,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[51].Exporter = func(v any, i int) any {
switch v := v.(*ListAliasesRequest); i {
case 0:
return &v.state
@@ -7540,7 +7540,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[52].Exporter = func(v any, i int) any {
switch v := v.(*ListCollectionAliasesRequest); i {
case 0:
return &v.state
@@ -7552,7 +7552,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[53].Exporter = func(v any, i int) any {
switch v := v.(*AliasDescription); i {
case 0:
return &v.state
@@ -7564,7 +7564,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[54].Exporter = func(v any, i int) any {
switch v := v.(*ListAliasesResponse); i {
case 0:
return &v.state
@@ -7576,7 +7576,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[55].Exporter = func(v any, i int) any {
switch v := v.(*CollectionClusterInfoRequest); i {
case 0:
return &v.state
@@ -7588,7 +7588,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[56].Exporter = func(v any, i int) any {
switch v := v.(*ShardKey); i {
case 0:
return &v.state
@@ -7600,7 +7600,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[57].Exporter = func(v any, i int) any {
switch v := v.(*LocalShardInfo); i {
case 0:
return &v.state
@@ -7612,7 +7612,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[58].Exporter = func(v any, i int) any {
switch v := v.(*RemoteShardInfo); i {
case 0:
return &v.state
@@ -7624,7 +7624,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[59].Exporter = func(v any, i int) any {
switch v := v.(*ShardTransferInfo); i {
case 0:
return &v.state
@@ -7636,7 +7636,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[60].Exporter = func(v any, i int) any {
switch v := v.(*ReshardingInfo); i {
case 0:
return &v.state
@@ -7648,7 +7648,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[61].Exporter = func(v any, i int) any {
switch v := v.(*CollectionClusterInfoResponse); i {
case 0:
return &v.state
@@ -7660,7 +7660,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[62].Exporter = func(v any, i int) any {
switch v := v.(*MoveShard); i {
case 0:
return &v.state
@@ -7672,7 +7672,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[63].Exporter = func(v any, i int) any {
switch v := v.(*ReplicateShard); i {
case 0:
return &v.state
@@ -7684,7 +7684,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[64].Exporter = func(v any, i int) any {
switch v := v.(*AbortShardTransfer); i {
case 0:
return &v.state
@@ -7696,7 +7696,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[65].Exporter = func(v any, i int) any {
switch v := v.(*RestartTransfer); i {
case 0:
return &v.state
@@ -7708,7 +7708,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[66].Exporter = func(v any, i int) any {
switch v := v.(*Replica); i {
case 0:
return &v.state
@@ -7720,7 +7720,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[67].Exporter = func(v any, i int) any {
switch v := v.(*CreateShardKey); i {
case 0:
return &v.state
@@ -7732,7 +7732,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[68].Exporter = func(v any, i int) any {
switch v := v.(*DeleteShardKey); i {
case 0:
return &v.state
@@ -7744,7 +7744,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[69].Exporter = func(v any, i int) any {
switch v := v.(*UpdateCollectionClusterSetupRequest); i {
case 0:
return &v.state
@@ -7756,7 +7756,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[70].Exporter = func(v any, i int) any {
switch v := v.(*UpdateCollectionClusterSetupResponse); i {
case 0:
return &v.state
@@ -7768,7 +7768,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[71].Exporter = func(v any, i int) any {
switch v := v.(*CreateShardKeyRequest); i {
case 0:
return &v.state
@@ -7780,7 +7780,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[72].Exporter = func(v any, i int) any {
switch v := v.(*DeleteShardKeyRequest); i {
case 0:
return &v.state
@@ -7792,7 +7792,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[73].Exporter = func(v any, i int) any {
switch v := v.(*CreateShardKeyResponse); i {
case 0:
return &v.state
@@ -7804,7 +7804,7 @@ func file_collections_proto_init() {
return nil
}
}
- file_collections_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
+ file_collections_proto_msgTypes[74].Exporter = func(v any, i int) any {
switch v := v.(*DeleteShardKeyResponse); i {
case 0:
return &v.state
@@ -7817,48 +7817,48 @@ func file_collections_proto_init() {
}
}
}
- file_collections_proto_msgTypes[0].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[1].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[4].OneofWrappers = []interface{}{
+ file_collections_proto_msgTypes[0].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[1].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[4].OneofWrappers = []any{
(*VectorsConfig_Params)(nil),
(*VectorsConfig_ParamsMap)(nil),
}
- file_collections_proto_msgTypes[5].OneofWrappers = []interface{}{
+ file_collections_proto_msgTypes[5].OneofWrappers = []any{
(*VectorsConfigDiff_Params)(nil),
(*VectorsConfigDiff_ParamsMap)(nil),
}
- file_collections_proto_msgTypes[6].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[18].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[19].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[20].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[21].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[22].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[23].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[24].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[25].OneofWrappers = []interface{}{
+ file_collections_proto_msgTypes[6].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[18].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[19].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[20].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[21].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[22].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[23].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[24].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[25].OneofWrappers = []any{
(*QuantizationConfig_Scalar)(nil),
(*QuantizationConfig_Product)(nil),
(*QuantizationConfig_Binary)(nil),
}
- file_collections_proto_msgTypes[27].OneofWrappers = []interface{}{
+ file_collections_proto_msgTypes[27].OneofWrappers = []any{
(*QuantizationConfigDiff_Scalar)(nil),
(*QuantizationConfigDiff_Product)(nil),
(*QuantizationConfigDiff_Disabled)(nil),
(*QuantizationConfigDiff_Binary)(nil),
}
- file_collections_proto_msgTypes[28].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[29].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[30].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[32].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[33].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[34].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[35].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[36].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[37].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[39].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[41].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[42].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[43].OneofWrappers = []interface{}{
+ file_collections_proto_msgTypes[28].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[29].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[30].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[32].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[33].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[34].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[35].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[36].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[37].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[39].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[41].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[42].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[43].OneofWrappers = []any{
(*PayloadIndexParams_KeywordIndexParams)(nil),
(*PayloadIndexParams_IntegerIndexParams)(nil),
(*PayloadIndexParams_FloatIndexParams)(nil),
@@ -7868,28 +7868,28 @@ func file_collections_proto_init() {
(*PayloadIndexParams_DatetimeIndexParams)(nil),
(*PayloadIndexParams_UuidIndexParams)(nil),
}
- file_collections_proto_msgTypes[44].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[45].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[46].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[47].OneofWrappers = []interface{}{
+ file_collections_proto_msgTypes[44].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[45].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[46].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[47].OneofWrappers = []any{
(*AliasOperations_CreateAlias)(nil),
(*AliasOperations_RenameAlias)(nil),
(*AliasOperations_DeleteAlias)(nil),
}
- file_collections_proto_msgTypes[56].OneofWrappers = []interface{}{
+ file_collections_proto_msgTypes[56].OneofWrappers = []any{
(*ShardKey_Keyword)(nil),
(*ShardKey_Number)(nil),
}
- file_collections_proto_msgTypes[57].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[58].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[59].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[60].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[62].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[63].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[64].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[65].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[67].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[69].OneofWrappers = []interface{}{
+ file_collections_proto_msgTypes[57].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[58].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[59].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[60].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[62].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[63].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[64].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[65].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[67].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[69].OneofWrappers = []any{
(*UpdateCollectionClusterSetupRequest_MoveShard)(nil),
(*UpdateCollectionClusterSetupRequest_ReplicateShard)(nil),
(*UpdateCollectionClusterSetupRequest_AbortTransfer)(nil),
@@ -7898,8 +7898,8 @@ func file_collections_proto_init() {
(*UpdateCollectionClusterSetupRequest_DeleteShardKey)(nil),
(*UpdateCollectionClusterSetupRequest_RestartTransfer)(nil),
}
- file_collections_proto_msgTypes[71].OneofWrappers = []interface{}{}
- file_collections_proto_msgTypes[72].OneofWrappers = []interface{}{}
+ file_collections_proto_msgTypes[71].OneofWrappers = []any{}
+ file_collections_proto_msgTypes[72].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/qdrant/collections_service.pb.go b/qdrant/collections_service.pb.go
index 2eed4af..7b66ffe 100644
--- a/qdrant/collections_service.pb.go
+++ b/qdrant/collections_service.pb.go
@@ -1,10 +1,10 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.26.0
-// protoc v4.22.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.1
// source: collections_service.proto
-package go_client
+package qdrant
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -99,7 +99,7 @@ var file_collections_service_proto_rawDesc = []byte{
0x74, 0x6f, 0x33,
}
-var file_collections_service_proto_goTypes = []interface{}{
+var file_collections_service_proto_goTypes = []any{
(*GetCollectionInfoRequest)(nil), // 0: qdrant.GetCollectionInfoRequest
(*ListCollectionsRequest)(nil), // 1: qdrant.ListCollectionsRequest
(*CreateCollection)(nil), // 2: qdrant.CreateCollection
diff --git a/qdrant/collections_service_grpc.pb.go b/qdrant/collections_service_grpc.pb.go
index dfa0499..79a0fa7 100644
--- a/qdrant/collections_service_grpc.pb.go
+++ b/qdrant/collections_service_grpc.pb.go
@@ -1,22 +1,36 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.2.0
-// - protoc v4.22.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v4.25.1
// source: collections_service.proto
-package go_client
+package qdrant
import (
context "context"
grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ Collections_Get_FullMethodName = "/qdrant.Collections/Get"
+ Collections_List_FullMethodName = "/qdrant.Collections/List"
+ Collections_Create_FullMethodName = "/qdrant.Collections/Create"
+ Collections_Update_FullMethodName = "/qdrant.Collections/Update"
+ Collections_Delete_FullMethodName = "/qdrant.Collections/Delete"
+ Collections_UpdateAliases_FullMethodName = "/qdrant.Collections/UpdateAliases"
+ Collections_ListCollectionAliases_FullMethodName = "/qdrant.Collections/ListCollectionAliases"
+ Collections_ListAliases_FullMethodName = "/qdrant.Collections/ListAliases"
+ Collections_CollectionClusterInfo_FullMethodName = "/qdrant.Collections/CollectionClusterInfo"
+ Collections_CollectionExists_FullMethodName = "/qdrant.Collections/CollectionExists"
+ Collections_UpdateCollectionClusterSetup_FullMethodName = "/qdrant.Collections/UpdateCollectionClusterSetup"
+ Collections_CreateShardKey_FullMethodName = "/qdrant.Collections/CreateShardKey"
+ Collections_DeleteShardKey_FullMethodName = "/qdrant.Collections/DeleteShardKey"
+)
// CollectionsClient is the client API for Collections service.
//
@@ -59,8 +73,9 @@ func NewCollectionsClient(cc grpc.ClientConnInterface) CollectionsClient {
}
func (c *collectionsClient) Get(ctx context.Context, in *GetCollectionInfoRequest, opts ...grpc.CallOption) (*GetCollectionInfoResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetCollectionInfoResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/Get", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_Get_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -68,8 +83,9 @@ func (c *collectionsClient) Get(ctx context.Context, in *GetCollectionInfoReques
}
func (c *collectionsClient) List(ctx context.Context, in *ListCollectionsRequest, opts ...grpc.CallOption) (*ListCollectionsResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListCollectionsResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/List", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_List_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -77,8 +93,9 @@ func (c *collectionsClient) List(ctx context.Context, in *ListCollectionsRequest
}
func (c *collectionsClient) Create(ctx context.Context, in *CreateCollection, opts ...grpc.CallOption) (*CollectionOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CollectionOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/Create", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_Create_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -86,8 +103,9 @@ func (c *collectionsClient) Create(ctx context.Context, in *CreateCollection, op
}
func (c *collectionsClient) Update(ctx context.Context, in *UpdateCollection, opts ...grpc.CallOption) (*CollectionOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CollectionOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/Update", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_Update_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -95,8 +113,9 @@ func (c *collectionsClient) Update(ctx context.Context, in *UpdateCollection, op
}
func (c *collectionsClient) Delete(ctx context.Context, in *DeleteCollection, opts ...grpc.CallOption) (*CollectionOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CollectionOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/Delete", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_Delete_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -104,8 +123,9 @@ func (c *collectionsClient) Delete(ctx context.Context, in *DeleteCollection, op
}
func (c *collectionsClient) UpdateAliases(ctx context.Context, in *ChangeAliases, opts ...grpc.CallOption) (*CollectionOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CollectionOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/UpdateAliases", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_UpdateAliases_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -113,8 +133,9 @@ func (c *collectionsClient) UpdateAliases(ctx context.Context, in *ChangeAliases
}
func (c *collectionsClient) ListCollectionAliases(ctx context.Context, in *ListCollectionAliasesRequest, opts ...grpc.CallOption) (*ListAliasesResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListAliasesResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/ListCollectionAliases", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_ListCollectionAliases_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -122,8 +143,9 @@ func (c *collectionsClient) ListCollectionAliases(ctx context.Context, in *ListC
}
func (c *collectionsClient) ListAliases(ctx context.Context, in *ListAliasesRequest, opts ...grpc.CallOption) (*ListAliasesResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListAliasesResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/ListAliases", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_ListAliases_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -131,8 +153,9 @@ func (c *collectionsClient) ListAliases(ctx context.Context, in *ListAliasesRequ
}
func (c *collectionsClient) CollectionClusterInfo(ctx context.Context, in *CollectionClusterInfoRequest, opts ...grpc.CallOption) (*CollectionClusterInfoResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CollectionClusterInfoResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/CollectionClusterInfo", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_CollectionClusterInfo_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -140,8 +163,9 @@ func (c *collectionsClient) CollectionClusterInfo(ctx context.Context, in *Colle
}
func (c *collectionsClient) CollectionExists(ctx context.Context, in *CollectionExistsRequest, opts ...grpc.CallOption) (*CollectionExistsResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CollectionExistsResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/CollectionExists", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_CollectionExists_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -149,8 +173,9 @@ func (c *collectionsClient) CollectionExists(ctx context.Context, in *Collection
}
func (c *collectionsClient) UpdateCollectionClusterSetup(ctx context.Context, in *UpdateCollectionClusterSetupRequest, opts ...grpc.CallOption) (*UpdateCollectionClusterSetupResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(UpdateCollectionClusterSetupResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/UpdateCollectionClusterSetup", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_UpdateCollectionClusterSetup_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -158,8 +183,9 @@ func (c *collectionsClient) UpdateCollectionClusterSetup(ctx context.Context, in
}
func (c *collectionsClient) CreateShardKey(ctx context.Context, in *CreateShardKeyRequest, opts ...grpc.CallOption) (*CreateShardKeyResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CreateShardKeyResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/CreateShardKey", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_CreateShardKey_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -167,397 +193,11 @@ func (c *collectionsClient) CreateShardKey(ctx context.Context, in *CreateShardK
}
func (c *collectionsClient) DeleteShardKey(ctx context.Context, in *DeleteShardKeyRequest, opts ...grpc.CallOption) (*DeleteShardKeyResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeleteShardKeyResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Collections/DeleteShardKey", in, out, opts...)
+ err := c.cc.Invoke(ctx, Collections_DeleteShardKey_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
-
-// CollectionsServer is the server API for Collections service.
-// All implementations must embed UnimplementedCollectionsServer
-// for forward compatibility
-type CollectionsServer interface {
- // Get detailed information about specified existing collection
- Get(context.Context, *GetCollectionInfoRequest) (*GetCollectionInfoResponse, error)
- // Get list name of all existing collections
- List(context.Context, *ListCollectionsRequest) (*ListCollectionsResponse, error)
- // Create new collection with given parameters
- Create(context.Context, *CreateCollection) (*CollectionOperationResponse, error)
- // Update parameters of the existing collection
- Update(context.Context, *UpdateCollection) (*CollectionOperationResponse, error)
- // Drop collection and all associated data
- Delete(context.Context, *DeleteCollection) (*CollectionOperationResponse, error)
- // Update Aliases of the existing collection
- UpdateAliases(context.Context, *ChangeAliases) (*CollectionOperationResponse, error)
- // Get list of all aliases for a collection
- ListCollectionAliases(context.Context, *ListCollectionAliasesRequest) (*ListAliasesResponse, error)
- // Get list of all aliases for all existing collections
- ListAliases(context.Context, *ListAliasesRequest) (*ListAliasesResponse, error)
- // Get cluster information for a collection
- CollectionClusterInfo(context.Context, *CollectionClusterInfoRequest) (*CollectionClusterInfoResponse, error)
- // Check the existence of a collection
- CollectionExists(context.Context, *CollectionExistsRequest) (*CollectionExistsResponse, error)
- // Update cluster setup for a collection
- UpdateCollectionClusterSetup(context.Context, *UpdateCollectionClusterSetupRequest) (*UpdateCollectionClusterSetupResponse, error)
- // Create shard key
- CreateShardKey(context.Context, *CreateShardKeyRequest) (*CreateShardKeyResponse, error)
- // Delete shard key
- DeleteShardKey(context.Context, *DeleteShardKeyRequest) (*DeleteShardKeyResponse, error)
- mustEmbedUnimplementedCollectionsServer()
-}
-
-// UnimplementedCollectionsServer must be embedded to have forward compatible implementations.
-type UnimplementedCollectionsServer struct {
-}
-
-func (UnimplementedCollectionsServer) Get(context.Context, *GetCollectionInfoRequest) (*GetCollectionInfoResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Get not implemented")
-}
-func (UnimplementedCollectionsServer) List(context.Context, *ListCollectionsRequest) (*ListCollectionsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
-}
-func (UnimplementedCollectionsServer) Create(context.Context, *CreateCollection) (*CollectionOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Create not implemented")
-}
-func (UnimplementedCollectionsServer) Update(context.Context, *UpdateCollection) (*CollectionOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Update not implemented")
-}
-func (UnimplementedCollectionsServer) Delete(context.Context, *DeleteCollection) (*CollectionOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
-}
-func (UnimplementedCollectionsServer) UpdateAliases(context.Context, *ChangeAliases) (*CollectionOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateAliases not implemented")
-}
-func (UnimplementedCollectionsServer) ListCollectionAliases(context.Context, *ListCollectionAliasesRequest) (*ListAliasesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListCollectionAliases not implemented")
-}
-func (UnimplementedCollectionsServer) ListAliases(context.Context, *ListAliasesRequest) (*ListAliasesResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListAliases not implemented")
-}
-func (UnimplementedCollectionsServer) CollectionClusterInfo(context.Context, *CollectionClusterInfoRequest) (*CollectionClusterInfoResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CollectionClusterInfo not implemented")
-}
-func (UnimplementedCollectionsServer) CollectionExists(context.Context, *CollectionExistsRequest) (*CollectionExistsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CollectionExists not implemented")
-}
-func (UnimplementedCollectionsServer) UpdateCollectionClusterSetup(context.Context, *UpdateCollectionClusterSetupRequest) (*UpdateCollectionClusterSetupResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateCollectionClusterSetup not implemented")
-}
-func (UnimplementedCollectionsServer) CreateShardKey(context.Context, *CreateShardKeyRequest) (*CreateShardKeyResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateShardKey not implemented")
-}
-func (UnimplementedCollectionsServer) DeleteShardKey(context.Context, *DeleteShardKeyRequest) (*DeleteShardKeyResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteShardKey not implemented")
-}
-func (UnimplementedCollectionsServer) mustEmbedUnimplementedCollectionsServer() {}
-
-// UnsafeCollectionsServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to CollectionsServer will
-// result in compilation errors.
-type UnsafeCollectionsServer interface {
- mustEmbedUnimplementedCollectionsServer()
-}
-
-func RegisterCollectionsServer(s grpc.ServiceRegistrar, srv CollectionsServer) {
- s.RegisterService(&Collections_ServiceDesc, srv)
-}
-
-func _Collections_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetCollectionInfoRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).Get(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/Get",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).Get(ctx, req.(*GetCollectionInfoRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Collections_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListCollectionsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).List(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/List",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).List(ctx, req.(*ListCollectionsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Collections_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateCollection)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).Create(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/Create",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).Create(ctx, req.(*CreateCollection))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Collections_Update_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateCollection)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).Update(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/Update",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).Update(ctx, req.(*UpdateCollection))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Collections_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteCollection)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).Delete(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/Delete",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).Delete(ctx, req.(*DeleteCollection))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Collections_UpdateAliases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ChangeAliases)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).UpdateAliases(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/UpdateAliases",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).UpdateAliases(ctx, req.(*ChangeAliases))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Collections_ListCollectionAliases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListCollectionAliasesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).ListCollectionAliases(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/ListCollectionAliases",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).ListCollectionAliases(ctx, req.(*ListCollectionAliasesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Collections_ListAliases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListAliasesRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).ListAliases(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/ListAliases",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).ListAliases(ctx, req.(*ListAliasesRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Collections_CollectionClusterInfo_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CollectionClusterInfoRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).CollectionClusterInfo(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/CollectionClusterInfo",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).CollectionClusterInfo(ctx, req.(*CollectionClusterInfoRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Collections_CollectionExists_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CollectionExistsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).CollectionExists(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/CollectionExists",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).CollectionExists(ctx, req.(*CollectionExistsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Collections_UpdateCollectionClusterSetup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateCollectionClusterSetupRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).UpdateCollectionClusterSetup(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/UpdateCollectionClusterSetup",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).UpdateCollectionClusterSetup(ctx, req.(*UpdateCollectionClusterSetupRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Collections_CreateShardKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateShardKeyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).CreateShardKey(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/CreateShardKey",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).CreateShardKey(ctx, req.(*CreateShardKeyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Collections_DeleteShardKey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteShardKeyRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(CollectionsServer).DeleteShardKey(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Collections/DeleteShardKey",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(CollectionsServer).DeleteShardKey(ctx, req.(*DeleteShardKeyRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-// Collections_ServiceDesc is the grpc.ServiceDesc for Collections service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var Collections_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "qdrant.Collections",
- HandlerType: (*CollectionsServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Get",
- Handler: _Collections_Get_Handler,
- },
- {
- MethodName: "List",
- Handler: _Collections_List_Handler,
- },
- {
- MethodName: "Create",
- Handler: _Collections_Create_Handler,
- },
- {
- MethodName: "Update",
- Handler: _Collections_Update_Handler,
- },
- {
- MethodName: "Delete",
- Handler: _Collections_Delete_Handler,
- },
- {
- MethodName: "UpdateAliases",
- Handler: _Collections_UpdateAliases_Handler,
- },
- {
- MethodName: "ListCollectionAliases",
- Handler: _Collections_ListCollectionAliases_Handler,
- },
- {
- MethodName: "ListAliases",
- Handler: _Collections_ListAliases_Handler,
- },
- {
- MethodName: "CollectionClusterInfo",
- Handler: _Collections_CollectionClusterInfo_Handler,
- },
- {
- MethodName: "CollectionExists",
- Handler: _Collections_CollectionExists_Handler,
- },
- {
- MethodName: "UpdateCollectionClusterSetup",
- Handler: _Collections_UpdateCollectionClusterSetup_Handler,
- },
- {
- MethodName: "CreateShardKey",
- Handler: _Collections_CreateShardKey_Handler,
- },
- {
- MethodName: "DeleteShardKey",
- Handler: _Collections_DeleteShardKey_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "collections_service.proto",
-}
diff --git a/qdrant/conditions.go b/qdrant/conditions.go
new file mode 100644
index 0000000..21d6f1c
--- /dev/null
+++ b/qdrant/conditions.go
@@ -0,0 +1,321 @@
+// This file contains constructor functions for creating conditions to be used in filters.
+// https://qdrant.tech/documentation/concepts/filtering/#filtering-conditions
+
+package qdrant
+
+// Creates a condition that matches an exact keyword in a specified field.
+// This is an alias for NewMatchKeyword().
+// See: https://qdrant.tech/documentation/concepts/filtering/#match
+func NewMatch(field, keyword string) *Condition {
+ return NewMatchKeyword(field, keyword)
+}
+
+// Creates a condition that matches an exact keyword in a specified field.
+// See: https://qdrant.tech/documentation/concepts/filtering/#match
+func NewMatchKeyword(field, keyword string) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ Match: &Match{
+ MatchValue: &Match_Keyword{Keyword: keyword},
+ },
+ },
+ },
+ }
+}
+
+// Creates a condition to match a specific substring, token or phrase.
+// Exact texts that will match the condition depend on full-text index configuration.
+// See: https://qdrant.tech/documentation/concepts/filtering/#full-text-match
+func NewMatchText(field, text string) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ Match: &Match{
+ MatchValue: &Match_Text{Text: text},
+ },
+ },
+ },
+ }
+}
+
+// Creates a condition that matches a boolean value in a specified field.
+// See: https://qdrant.tech/documentation/concepts/filtering/#match
+func NewMatchBool(field string, value bool) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ Match: &Match{
+ MatchValue: &Match_Boolean{Boolean: value},
+ },
+ },
+ },
+ }
+}
+
+// Creates a condition that matches an integer value in a specified field.
+// See: https://qdrant.tech/documentation/concepts/filtering/#match
+func NewMatchInt(field string, value int64) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ Match: &Match{
+ MatchValue: &Match_Integer{Integer: value},
+ },
+ },
+ },
+ }
+}
+
+// Creates a condition that matches any of the given keywords in a specified field.
+// See: https://qdrant.tech/documentation/concepts/filtering/#match-any
+func NewMatchKeywords(field string, keywords ...string) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ Match: &Match{
+ MatchValue: &Match_Keywords{Keywords: &RepeatedStrings{
+ Strings: keywords,
+ }},
+ },
+ },
+ },
+ }
+}
+
+// Creates a condition that matches any of the given integer values in a specified field.
+// See: https://qdrant.tech/documentation/concepts/filtering/#match-any
+func NewMatchInts(field string, values ...int64) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ Match: &Match{
+ MatchValue: &Match_Integers{Integers: &RepeatedIntegers{
+ Integers: values,
+ }},
+ },
+ },
+ },
+ }
+}
+
+// Creates a condition that matches any value except the given keywords in a specified field.
+// This is an alias for NewMatchExceptKeywords.
+// See: https://qdrant.tech/documentation/concepts/filtering/#match-except
+func NewMatchExcept(field string, keywords ...string) *Condition {
+ return NewMatchExceptKeywords(field, keywords...)
+}
+
+// Creates a condition that matches any value except the given keywords in a specified field.
+// See: https://qdrant.tech/documentation/concepts/filtering/#match-except
+func NewMatchExceptKeywords(field string, keywords ...string) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ Match: &Match{
+ MatchValue: &Match_ExceptKeywords{ExceptKeywords: &RepeatedStrings{
+ Strings: keywords,
+ }},
+ },
+ },
+ },
+ }
+}
+
+// Creates a condition that matches any value except the given integer values in a specified field.
+// See: https://qdrant.tech/documentation/concepts/filtering/#match-except
+func NewMatchExceptInts(field string, values ...int64) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ Match: &Match{
+ MatchValue: &Match_ExceptIntegers{ExceptIntegers: &RepeatedIntegers{
+ Integers: values,
+ }},
+ },
+ },
+ },
+ }
+}
+
+// Creates a condition that checks if a specified field is null.
+// See: https://qdrant.tech/documentation/concepts/filtering/#is-null
+func NewIsNull(field string) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_IsNull{
+ IsNull: &IsNullCondition{
+ Key: field,
+ },
+ },
+ }
+}
+
+// Creates a condition that checks if a specified field is empty.
+// See: https://qdrant.tech/documentation/concepts/filtering/#is-empty
+func NewIsEmpty(field string) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_IsEmpty{
+ IsEmpty: &IsEmptyCondition{
+ Key: field,
+ },
+ },
+ }
+}
+
+// Creates a condition that checks if a point has any of the specified IDs.
+// See: https://qdrant.tech/documentation/concepts/filtering/#has-id
+func NewHasID(ids ...*PointId) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_HasId{
+ HasId: &HasIdCondition{
+ HasId: ids,
+ },
+ },
+ }
+}
+
+// Creates a nested condition for filtering on nested fields.
+// See: https://qdrant.tech/documentation/concepts/filtering/#nested
+func NewNestedCondition(field string, conditon *Condition) *Condition {
+ filter := &Filter{
+ Must: []*Condition{conditon},
+ }
+
+ return &Condition{
+ ConditionOneOf: &Condition_Nested{
+ Nested: &NestedCondition{
+ Key: field,
+ Filter: filter,
+ },
+ },
+ }
+}
+
+// Creates a nested filter for filtering on nested fields.
+// See: https://qdrant.tech/documentation/concepts/filtering/#nested
+func NewNestedFilter(field string, filter *Filter) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Nested{
+ Nested: &NestedCondition{
+ Key: field,
+ Filter: filter,
+ },
+ },
+ }
+}
+
+// Creates a condition from a filter.
+// This is useful for creating complex nested conditions.
+func NewFilterAsCondition(filter *Filter) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Filter{
+ Filter: filter,
+ },
+ }
+}
+
+// Creates a range condition for numeric or date fields.
+// See: https://qdrant.tech/documentation/concepts/filtering/#range
+func NewRange(field string, rangeVal *Range) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ Range: rangeVal,
+ },
+ },
+ }
+}
+
+// Creates a geo filter condition that matches points within a specified radius from a center point.
+// See: https://qdrant.tech/documentation/concepts/filtering/#geo-radius
+func NewGeoRadius(field string, lat, long float64, radius float32) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ GeoRadius: &GeoRadius{
+ Radius: radius,
+ Center: &GeoPoint{
+ Lat: lat,
+ Lon: long,
+ },
+ },
+ },
+ },
+ }
+}
+
+// Creates a geo filter condition that matches points within a specified bounding box.
+// See: https://qdrant.tech/documentation/concepts/filtering/#geo-bounding-box
+func NewGeoBoundingBox(field string, topLeftLat, topLeftLon, bottomRightLat, bottomRightLon float64) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ GeoBoundingBox: &GeoBoundingBox{
+ TopLeft: &GeoPoint{
+ Lat: topLeftLat,
+ Lon: topLeftLon,
+ },
+ BottomRight: &GeoPoint{
+ Lat: bottomRightLat,
+ Lon: bottomRightLon,
+ },
+ },
+ },
+ },
+ }
+}
+
+// Creates a geo filter condition that matches points within a specified polygon.
+// See: https://qdrant.tech/documentation/concepts/filtering/#geo-polygon
+func NewGeoPolygon(field string, exterior *GeoLineString, interior ...*GeoLineString) *Condition {
+ geoPolygon := &GeoPolygon{
+ Exterior: exterior,
+ Interiors: interior,
+ }
+
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ GeoPolygon: geoPolygon,
+ },
+ },
+ }
+}
+
+// Creates a condition that filters based on the number of values in an array field.
+// See: https://qdrant.tech/documentation/concepts/filtering/#values-count
+func NewValuesCount(field string, valuesCount *ValuesCount) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ ValuesCount: valuesCount,
+ },
+ },
+ }
+}
+
+// Creates a condition that filters based on a datetime range.
+// See: https://qdrant.tech/documentation/concepts/filtering/#datetime-range
+func NewDatetimeRange(field string, dateTimeRange *DatetimeRange) *Condition {
+ return &Condition{
+ ConditionOneOf: &Condition_Field{
+ Field: &FieldCondition{
+ Key: field,
+ DatetimeRange: dateTimeRange,
+ },
+ },
+ }
+}
diff --git a/qdrant/config.go b/qdrant/config.go
new file mode 100644
index 0000000..b53cf11
--- /dev/null
+++ b/qdrant/config.go
@@ -0,0 +1,77 @@
+package qdrant
+
+import (
+ "context"
+ "crypto/tls"
+ "fmt"
+ "log/slog"
+
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/credentials"
+ "google.golang.org/grpc/credentials/insecure"
+ "google.golang.org/grpc/metadata"
+)
+
+const (
+ apiKeyHeader = "api-key"
+ defaultHost = "localhost"
+ defaultPort = 6334
+)
+
+// Configuration options for the client.
+type Config struct {
+ // Hostname of the Qdrant server. Defaults to "localhost".
+ Host string
+ // gRPC port of the Qdrant server. Defaults to 6334.
+ Port int
+ // API key to use for authentication. Defaults to "".
+ APIKey string
+ // Whether to use TLS for the connection. Defaults to false.
+ UseTLS bool
+ // TLS configuration to use for the connection.
+ // If not provided, uses default config with minimum TLS version set to 1.3
+ TLSConfig *tls.Config
+ // Additional gRPC options to use for the connection.
+ GrpcOptions []grpc.DialOption
+}
+
+// Internal method.
+func (c *Config) getAddr() string {
+ host := c.Host
+ if host == "" {
+ host = defaultHost
+ }
+ port := c.Port
+ if port == 0 {
+ port = defaultPort
+ }
+ return fmt.Sprintf("%s:%d", host, port)
+}
+
+// Internal method.
+func (c *Config) getTransportCreds() grpc.DialOption {
+ if c.UseTLS {
+ if c.TLSConfig == nil {
+ return grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{
+ MinVersion: tls.VersionTLS13,
+ }))
+ }
+ return grpc.WithTransportCredentials(credentials.NewTLS(c.TLSConfig))
+ } else if c.APIKey != "" {
+ slog.Default().Warn("API key is being used without TLS(HTTPS). It will be transmitted in plaintext.")
+ }
+ return grpc.WithTransportCredentials(insecure.NewCredentials())
+}
+
+// Internal method.
+//
+//nolint:lll
+func (c *Config) getAPIKeyInterceptor() grpc.DialOption {
+ return grpc.WithUnaryInterceptor(func(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+ newCtx := ctx
+ if c.APIKey != "" {
+ newCtx = metadata.AppendToOutgoingContext(ctx, apiKeyHeader, c.APIKey)
+ }
+ return invoker(newCtx, method, req, reply, cc, opts...)
+ })
+}
diff --git a/qdrant/doc.go b/qdrant/doc.go
new file mode 100644
index 0000000..b14f155
--- /dev/null
+++ b/qdrant/doc.go
@@ -0,0 +1,16 @@
+/*
+# Qdrant Go Client
+
+Package provides a client for interfacing with the Qdrant - https://qdrant.tech/ gRPC API.
+
+# Documentation
+
+- Usage examples are available throughout the [Qdrant documentation] and [API Reference]
+
+- [Godoc Reference]
+
+[Qdrant documentation]: https://qdrant.tech/documentation/
+[API Reference]: https://api.qdrant.tech/
+[Godoc Reference]: https://pkg.go.dev/github.com/qdrant/go-client
+*/
+package qdrant
diff --git a/qdrant/error.go b/qdrant/error.go
new file mode 100644
index 0000000..469364b
--- /dev/null
+++ b/qdrant/error.go
@@ -0,0 +1,35 @@
+package qdrant
+
+import (
+ "fmt"
+ "strings"
+)
+
+//nolint:revive // The linter says qdrant.QdrantError stutters, but it's an apt name.
+type QdrantError struct {
+ operationName string
+ context string
+ err error
+}
+
+// Error returns the error as string.
+func (e *QdrantError) Error() string {
+ if e.context == "" {
+ return fmt.Sprintf("%s() failed: %v", e.operationName, e.err)
+ }
+ return fmt.Sprintf("%s() failed: %s: %v", e.operationName, e.context, e.err)
+}
+
+// Unwrap returns the inner error.
+func (e *QdrantError) Unwrap() error {
+ return e.err
+}
+
+func newQdrantErr(err error, operationName string, contexts ...string) *QdrantError {
+ combinedContext := strings.Join(contexts, ": ")
+ return &QdrantError{
+ operationName: operationName,
+ err: err,
+ context: combinedContext,
+ }
+}
diff --git a/qdrant/grpc_client.go b/qdrant/grpc_client.go
new file mode 100644
index 0000000..f38dc6c
--- /dev/null
+++ b/qdrant/grpc_client.go
@@ -0,0 +1,86 @@
+package qdrant
+
+import (
+ "google.golang.org/grpc"
+)
+
+// Lower level client for Qdrant gRPC API.
+type GrpcClient struct {
+ conn *grpc.ClientConn
+ // Qdrant service interface
+ // https://github.com/qdrant/qdrant/blob/master/lib/api/src/grpc/proto/qdrant.proto
+ qdrant QdrantClient
+ // Collections service interface
+ // https://github.com/qdrant/qdrant/blob/master/lib/api/src/grpc/proto/collections_service.proto
+ collections CollectionsClient
+ // Points service interface
+ // https://github.com/qdrant/qdrant/blob/master/lib/api/src/grpc/proto/points_service.proto
+ points PointsClient
+ // Snapshots service interface
+ // https://github.com/qdrant/qdrant/blob/master/lib/api/src/grpc/proto/snapshots_service.proto
+ snapshots SnapshotsClient
+}
+
+// Create a new gRPC client with default configuration.
+func NewDefaultGrpcClient() (*GrpcClient, error) {
+ return NewGrpcClient(&Config{})
+}
+
+// Create a new gRPC client with custom configuration.
+func NewGrpcClient(config *Config) (*GrpcClient, error) {
+ // We append config.GrpcOptions in the end
+ // so that user's explicit options take precedence
+ config.GrpcOptions = append([]grpc.DialOption{
+ config.getTransportCreds(),
+ config.getAPIKeyInterceptor(),
+ }, config.GrpcOptions...)
+
+ conn, err := grpc.NewClient(config.getAddr(), config.GrpcOptions...)
+
+ if err != nil {
+ return nil, err
+ }
+
+ return NewGrpcClientFromConn(conn), nil
+}
+
+// Create a new gRPC client from existing connection.
+func NewGrpcClientFromConn(conn *grpc.ClientConn) *GrpcClient {
+ return &GrpcClient{
+ conn: conn,
+ qdrant: NewQdrantClient(conn),
+ points: NewPointsClient(conn),
+ collections: NewCollectionsClient(conn),
+ snapshots: NewSnapshotsClient(conn),
+ }
+}
+
+// Get the underlying gRPC connection.
+func (c *GrpcClient) Conn() *grpc.ClientConn {
+ return c.conn
+}
+
+// Get the Qdrant service interface.
+func (c *GrpcClient) Qdrant() QdrantClient {
+ return c.qdrant
+}
+
+// Get the Collections service interface.
+func (c *GrpcClient) Points() PointsClient {
+ return c.points
+}
+
+// Get the Points service interface.
+func (c *GrpcClient) Collections() CollectionsClient {
+ return c.collections
+}
+
+// Get the Snapshots service interface.
+func (c *GrpcClient) Snapshots() SnapshotsClient {
+ return c.snapshots
+}
+
+// Tears down the *grpc.ClientConn and all underlying connections.
+func (c *GrpcClient) Close() error {
+ return c.conn.Close()
+}
diff --git a/qdrant/json_with_int.pb.go b/qdrant/json_with_int.pb.go
index 015f7a0..7106b9e 100644
--- a/qdrant/json_with_int.pb.go
+++ b/qdrant/json_with_int.pb.go
@@ -2,11 +2,11 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.26.0
-// protoc v4.22.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.1
// source: json_with_int.proto
-package go_client
+package qdrant
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -398,7 +398,7 @@ func file_json_with_int_proto_rawDescGZIP() []byte {
var file_json_with_int_proto_enumTypes = make([]protoimpl.EnumInfo, 1)
var file_json_with_int_proto_msgTypes = make([]protoimpl.MessageInfo, 4)
-var file_json_with_int_proto_goTypes = []interface{}{
+var file_json_with_int_proto_goTypes = []any{
(NullValue)(0), // 0: qdrant.NullValue
(*Struct)(nil), // 1: qdrant.Struct
(*Value)(nil), // 2: qdrant.Value
@@ -425,7 +425,7 @@ func file_json_with_int_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_json_with_int_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_json_with_int_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*Struct); i {
case 0:
return &v.state
@@ -437,7 +437,7 @@ func file_json_with_int_proto_init() {
return nil
}
}
- file_json_with_int_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_json_with_int_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*Value); i {
case 0:
return &v.state
@@ -449,7 +449,7 @@ func file_json_with_int_proto_init() {
return nil
}
}
- file_json_with_int_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_json_with_int_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*ListValue); i {
case 0:
return &v.state
@@ -462,7 +462,7 @@ func file_json_with_int_proto_init() {
}
}
}
- file_json_with_int_proto_msgTypes[1].OneofWrappers = []interface{}{
+ file_json_with_int_proto_msgTypes[1].OneofWrappers = []any{
(*Value_NullValue)(nil),
(*Value_DoubleValue)(nil),
(*Value_IntegerValue)(nil),
diff --git a/qdrant/oneof_factory.go b/qdrant/oneof_factory.go
new file mode 100644
index 0000000..7dddfa1
--- /dev/null
+++ b/qdrant/oneof_factory.go
@@ -0,0 +1,830 @@
+// This file contains helper functions to create oneof gRPC fields.
+// It is very verbose to create them using the struct literal syntax.
+// So we do it for the users and export these helper functions.
+
+package qdrant
+
+import "google.golang.org/protobuf/types/known/timestamppb"
+
+// Creates a *VectorsConfig instance from *VectorParams.
+func NewVectorsConfig(params *VectorParams) *VectorsConfig {
+ return &VectorsConfig{
+ Config: &VectorsConfig_Params{
+ Params: params,
+ },
+ }
+}
+
+// Creates a *VectorsConfig instance from a map of *VectorParams.
+func NewVectorsConfigMap(paramsMap map[string]*VectorParams) *VectorsConfig {
+ return &VectorsConfig{
+ Config: &VectorsConfig_ParamsMap{
+ ParamsMap: &VectorParamsMap{
+ Map: paramsMap,
+ },
+ },
+ }
+}
+
+// Creates a *SparseVectorConfig instance from a map of *SparseVectorParams.
+func NewSparseVectorsConfig(paramsMap map[string]*SparseVectorParams) *SparseVectorConfig {
+ return &SparseVectorConfig{
+ Map: paramsMap,
+ }
+}
+
+// Creates a *VectorsConfigDiff instance from *VectorParamsDiff.
+func NewVectorsConfigDiff(params *VectorParamsDiff) *VectorsConfigDiff {
+ return &VectorsConfigDiff{
+ Config: &VectorsConfigDiff_Params{
+ Params: params,
+ },
+ }
+}
+
+// Creates a *VectorsConfigDiff instance from a map of *VectorParamsDiff.
+func NewVectorsConfigDiffMap(paramsMap map[string]*VectorParamsDiff) *VectorsConfigDiff {
+ return &VectorsConfigDiff{
+ Config: &VectorsConfigDiff_ParamsMap{
+ ParamsMap: &VectorParamsDiffMap{
+ Map: paramsMap,
+ },
+ },
+ }
+}
+
+// Creates a *QuantizationConfig instance from *ScalarQuantization.
+func NewQuantizationScalar(scalar *ScalarQuantization) *QuantizationConfig {
+ return &QuantizationConfig{
+ Quantization: &QuantizationConfig_Scalar{
+ Scalar: scalar,
+ },
+ }
+}
+
+// Creates a *QuantizationConfig instance from *BinaryQuantization.
+func NewQuantizationBinary(binary *BinaryQuantization) *QuantizationConfig {
+ return &QuantizationConfig{
+ Quantization: &QuantizationConfig_Binary{
+ Binary: binary,
+ },
+ }
+}
+
+// Creates a *QuantizationConfig instance from *ProductQuantization.
+func NewQuantizationProduct(product *ProductQuantization) *QuantizationConfig {
+ return &QuantizationConfig{
+ Quantization: &QuantizationConfig_Product{
+ Product: product,
+ },
+ }
+}
+
+// Creates a *QuantizationConfigDiff instance from *ScalarQuantization.
+func NewQuantizationDiffScalar(scalar *ScalarQuantization) *QuantizationConfigDiff {
+ return &QuantizationConfigDiff{
+ Quantization: &QuantizationConfigDiff_Scalar{
+ Scalar: scalar,
+ },
+ }
+}
+
+// Creates a *QuantizationConfigDiff instance from *BinaryQuantization.
+func NewQuantizationDiffBinary(binary *BinaryQuantization) *QuantizationConfigDiff {
+ return &QuantizationConfigDiff{
+ Quantization: &QuantizationConfigDiff_Binary{
+ Binary: binary,
+ },
+ }
+}
+
+// Creates a *QuantizationConfigDiff instance from *ProductQuantization.
+func NewQuantizationDiffProduct(product *ProductQuantization) *QuantizationConfigDiff {
+ return &QuantizationConfigDiff{
+ Quantization: &QuantizationConfigDiff_Product{
+ Product: product,
+ },
+ }
+}
+
+// Creates a *QuantizationConfigDiff instance with quantization disabled.
+func NewQuantizationDiffDisabled() *QuantizationConfigDiff {
+ return &QuantizationConfigDiff{
+ Quantization: &QuantizationConfigDiff_Disabled{
+ Disabled: &Disabled{},
+ },
+ }
+}
+
+// Creates a *PayloadIndexParams instance from *KeywordIndexParams.
+// This is an alias for NewPayloadIndexParamsKeyword().
+func NewPayloadIndexParams(params *KeywordIndexParams) *PayloadIndexParams {
+ return NewPayloadIndexParamsKeyword(params)
+}
+
+// Creates a *PayloadIndexParams instance from *KeywordIndexParams.
+func NewPayloadIndexParamsKeyword(params *KeywordIndexParams) *PayloadIndexParams {
+ return &PayloadIndexParams{
+ IndexParams: &PayloadIndexParams_KeywordIndexParams{
+ KeywordIndexParams: params,
+ },
+ }
+}
+
+// Creates a *PayloadIndexParams instance from *IntegerIndexParams.
+func NewPayloadIndexParamsInt(params *IntegerIndexParams) *PayloadIndexParams {
+ return &PayloadIndexParams{
+ IndexParams: &PayloadIndexParams_IntegerIndexParams{
+ IntegerIndexParams: params,
+ },
+ }
+}
+
+// Creates a *PayloadIndexParams instance from *FloatIndexParams.
+func NewPayloadIndexParamsFloat(params *FloatIndexParams) *PayloadIndexParams {
+ return &PayloadIndexParams{
+ IndexParams: &PayloadIndexParams_FloatIndexParams{
+ FloatIndexParams: params,
+ },
+ }
+}
+
+// Creates a *PayloadIndexParams instance from *GeoIndexParams.
+func NewPayloadIndexParamsGeo(params *GeoIndexParams) *PayloadIndexParams {
+ return &PayloadIndexParams{
+ IndexParams: &PayloadIndexParams_GeoIndexParams{
+ GeoIndexParams: params,
+ },
+ }
+}
+
+// Creates a *PayloadIndexParams instance from *TextIndexParams.
+func NewPayloadIndexParamsText(params *TextIndexParams) *PayloadIndexParams {
+ return &PayloadIndexParams{
+ IndexParams: &PayloadIndexParams_TextIndexParams{
+ TextIndexParams: params,
+ },
+ }
+}
+
+// Creates a *PayloadIndexParams instance from *BoolIndexParams.
+func NewPayloadIndexParamsBool(params *BoolIndexParams) *PayloadIndexParams {
+ return &PayloadIndexParams{
+ IndexParams: &PayloadIndexParams_BoolIndexParams{
+ BoolIndexParams: params,
+ },
+ }
+}
+
+// Creates a *PayloadIndexParams instance from *DatetimeIndexParams.
+func NewPayloadIndexParamsDatetime(params *DatetimeIndexParams) *PayloadIndexParams {
+ return &PayloadIndexParams{
+ IndexParams: &PayloadIndexParams_DatetimeIndexParams{
+ DatetimeIndexParams: params,
+ },
+ }
+}
+
+// Creates a *PayloadIndexParams instance from *UuidIndexParams.
+func NewPayloadIndexParamsUUID(params *UuidIndexParams) *PayloadIndexParams {
+ return &PayloadIndexParams{
+ IndexParams: &PayloadIndexParams_UuidIndexParams{
+ UuidIndexParams: params,
+ },
+ }
+}
+
+// Creates an *AliasOperations instance to create an alias.
+// This is an alias for NewAliasCreate().
+func NewAlias(aliasName, collectionName string) *AliasOperations {
+ return NewAliasCreate(aliasName, collectionName)
+}
+
+// Creates an *AliasOperations instance to create an alias.
+func NewAliasCreate(aliasName, collectionName string) *AliasOperations {
+ return &AliasOperations{
+ Action: &AliasOperations_CreateAlias{
+ CreateAlias: &CreateAlias{
+ AliasName: aliasName,
+ CollectionName: collectionName,
+ },
+ },
+ }
+}
+
+// Creates an *AliasOperations instance to rename an alias.
+func NewAliasRename(oldAliasName, newAliasName string) *AliasOperations {
+ return &AliasOperations{
+ Action: &AliasOperations_RenameAlias{
+ RenameAlias: &RenameAlias{
+ OldAliasName: oldAliasName,
+ NewAliasName: newAliasName,
+ },
+ },
+ }
+}
+
+// Creates an *AliasOperations instance to delete an alias.
+func NewAliasDelete(aliasName string) *AliasOperations {
+ return &AliasOperations{
+ Action: &AliasOperations_DeleteAlias{
+ DeleteAlias: &DeleteAlias{
+ AliasName: aliasName,
+ },
+ },
+ }
+}
+
+// Creates a *ShardKey instance from a string.
+// This is an alias for NewShardKeyKeyword().
+func NewShardKey(key string) *ShardKey {
+ return NewShardKeyKeyword(key)
+}
+
+// Creates a *ShardKey instance from a string.
+func NewShardKeyKeyword(key string) *ShardKey {
+ return &ShardKey{
+ Key: &ShardKey_Keyword{
+ Keyword: key,
+ },
+ }
+}
+
+// Creates a *ShardKey instance from a uint64.
+func NewShardKeyNum(key uint64) *ShardKey {
+ return &ShardKey{
+ Key: &ShardKey_Number{
+ Number: key,
+ },
+ }
+}
+
+// Creates a *ReadConsistency instance from a ReadConsistencyType.
+// This is an alias for NewReadConsistencyType().
+func NewReadConsistency(readConsistencyType ReadConsistencyType) *ReadConsistency {
+ return NewReadConsistencyType(readConsistencyType)
+}
+
+// Creates a *ReadConsistency instance from a ReadConsistencyType.
+func NewReadConsistencyType(readConsistencyType ReadConsistencyType) *ReadConsistency {
+ return &ReadConsistency{
+ Value: &ReadConsistency_Type{
+ Type: readConsistencyType,
+ },
+ }
+}
+
+// Creates a *ReadConsistency instance from a factor in uint64.
+func NewReadConsistencyFactor(readConsistencyFactor uint64) *ReadConsistency {
+ return &ReadConsistency{
+ Value: &ReadConsistency_Factor{
+ Factor: readConsistencyFactor,
+ },
+ }
+}
+
+// Creates a *PointId instance from a UUID string.
+// Same as NewIDUUID().
+func NewID(uuid string) *PointId {
+ return NewIDUUID(uuid)
+}
+
+// Creates a *PointId instance from a UUID string.
+func NewIDUUID(uuid string) *PointId {
+ return &PointId{
+ PointIdOptions: &PointId_Uuid{
+ Uuid: uuid,
+ },
+ }
+}
+
+// Creates a *PointId instance from a positive integer.
+func NewIDNum(num uint64) *PointId {
+ return &PointId{
+ PointIdOptions: &PointId_Num{
+ Num: num,
+ },
+ }
+}
+
+// Creates a *VectorInput instance for dense vectors.
+// This is an alias for NewVectorInputDense().
+func NewVectorInput(values ...float32) *VectorInput {
+ return NewVectorInputDense(values)
+}
+
+// Creates a *VectorInput instance from a *PointId.
+func NewVectorInputID(id *PointId) *VectorInput {
+ return &VectorInput{
+ Variant: &VectorInput_Id{
+ Id: id,
+ },
+ }
+}
+
+// Creates a *VectorInput instance for dense vectors.
+func NewVectorInputDense(vector []float32) *VectorInput {
+ return &VectorInput{
+ Variant: &VectorInput_Dense{
+ Dense: &DenseVector{
+ Data: vector,
+ },
+ },
+ }
+}
+
+// Creates a *VectorInput instance for sparse vectors.
+func NewVectorInputSparse(indices []uint32, values []float32) *VectorInput {
+ return &VectorInput{
+ Variant: &VectorInput_Sparse{
+ Sparse: &SparseVector{
+ Values: values,
+ Indices: indices,
+ },
+ },
+ }
+}
+
+// Creates a *VectorInput instance for multi vectors.
+func NewVectorInputMulti(vectors [][]float32) *VectorInput {
+ var multiVec []*DenseVector
+ for _, vector := range vectors {
+ multiVec = append(multiVec, &DenseVector{
+ Data: vector,
+ })
+ }
+ return &VectorInput{
+ Variant: &VectorInput_MultiDense{
+ MultiDense: &MultiDenseVector{
+ Vectors: multiVec,
+ },
+ },
+ }
+}
+
+// Creates a *WithPayloadSelector instance with payload enabled/disabled.
+// This is an alias for NewWithPayloadEnable().
+func NewWithPayload(enable bool) *WithPayloadSelector {
+ return NewWithPayloadEnable(enable)
+}
+
+// Creates a *WithPayloadSelector instance with payload enabled/disabled.
+func NewWithPayloadEnable(enable bool) *WithPayloadSelector {
+ return &WithPayloadSelector{
+ SelectorOptions: &WithPayloadSelector_Enable{
+ Enable: enable,
+ },
+ }
+}
+
+// Creates a *WithPayloadSelector instance with payload fields included.
+func NewWithPayloadInclude(include ...string) *WithPayloadSelector {
+ return &WithPayloadSelector{
+ SelectorOptions: &WithPayloadSelector_Include{
+ Include: &PayloadIncludeSelector{
+ Fields: include,
+ },
+ },
+ }
+}
+
+// Creates a *WithPayloadSelector instance with payload fields excluded.
+func NewWithPayloadExclude(exclude ...string) *WithPayloadSelector {
+ return &WithPayloadSelector{
+ SelectorOptions: &WithPayloadSelector_Exclude{
+ Exclude: &PayloadExcludeSelector{
+ Fields: exclude,
+ },
+ },
+ }
+}
+
+// Creates a *WithVectorsSelector instance with vectors enabled/disabled.
+// This is an alias for NewWithVectorsEnable().
+func NewWithVectors(enable bool) *WithVectorsSelector {
+ return NewWithVectorsEnable(enable)
+}
+
+// Creates a *WithVectorsSelector instance with vectors enabled/disabled.
+func NewWithVectorsEnable(enable bool) *WithVectorsSelector {
+ return &WithVectorsSelector{
+ SelectorOptions: &WithVectorsSelector_Enable{
+ Enable: enable,
+ },
+ }
+}
+
+// Creates a *WithVectorsSelector instance with vectors included.
+func NewWithVectorsInclude(names ...string) *WithVectorsSelector {
+ return &WithVectorsSelector{
+ SelectorOptions: &WithVectorsSelector_Include{
+ Include: &VectorsSelector{
+ Names: names,
+ },
+ },
+ }
+}
+
+// Creates a *Vectors instance for dense vectors.
+// This is an alias for NewVectorsDense().
+func NewVectors(values ...float32) *Vectors {
+ return NewVectorsDense(values)
+}
+
+// Creates a *Vectors instance for dense vectors.
+func NewVectorsDense(vector []float32) *Vectors {
+ return &Vectors{
+ VectorsOptions: &Vectors_Vector{
+ Vector: NewVectorDense(vector),
+ },
+ }
+}
+
+// Creates a *Vectors instance for sparse vectors.
+func NewVectorsSparse(indices []uint32, values []float32) *Vectors {
+ return &Vectors{
+ VectorsOptions: &Vectors_Vector{
+ Vector: NewVectorSparse(indices, values),
+ },
+ }
+}
+
+// Creates a *Vectors instance for multi vectors.
+func NewVectorsMulti(vectors [][]float32) *Vectors {
+ return &Vectors{
+ VectorsOptions: &Vectors_Vector{
+ Vector: NewVectorMulti(vectors),
+ },
+ }
+}
+
+// Creates a *Vectors instance for a map of named *Vector.
+func NewVectorsMap(vectors map[string]*Vector) *Vectors {
+ return &Vectors{
+ VectorsOptions: &Vectors_Vectors{
+ Vectors: &NamedVectors{
+ Vectors: vectors,
+ },
+ },
+ }
+}
+
+// Creates a *Vector instance for dense vectors.
+// This is an alias for NewVectorDense().
+func NewVector(values ...float32) *Vector {
+ return NewVectorDense(values)
+}
+
+// Creates a *Vector instance for dense vectors.
+func NewVectorDense(vector []float32) *Vector {
+ return &Vector{
+ Data: vector,
+ }
+}
+
+// Creates a *Vector instance for sparse vectors.
+func NewVectorSparse(indices []uint32, values []float32) *Vector {
+ return &Vector{
+ Data: values,
+ Indices: &SparseIndices{
+ Data: indices,
+ },
+ }
+}
+
+// Creates a *Vector instance for multi vectors.
+func NewVectorMulti(vectors [][]float32) *Vector {
+ vectorsCount := uint32(len(vectors))
+ var flattenedVec []float32
+ for _, vector := range vectors {
+ flattenedVec = append(flattenedVec, vector...)
+ }
+ return &Vector{
+ Data: flattenedVec,
+ VectorsCount: &vectorsCount,
+ }
+}
+
+// Creates a *StartFrom instance for a float value.
+func NewStartFromFloat(value float64) *StartFrom {
+ return &StartFrom{
+ Value: &StartFrom_Float{
+ Float: value,
+ },
+ }
+}
+
+// Creates a *StartFrom instance for an integer value.
+func NewStartFromInt(value int64) *StartFrom {
+ return &StartFrom{
+ Value: &StartFrom_Integer{
+ Integer: value,
+ },
+ }
+}
+
+// Creates a *StartFrom instance for a timestamp value.
+// Parameters:
+// seconds: Represents seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z.
+// Must be from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59Z inclusive.
+// nanos: Non-negative fractions of a second at nanosecond resolution.
+// Negative second values with fractions must still have non-negative
+// nanos values that count forward in time.
+// Must be from 0 to 999,999,999 inclusive.
+func NewStartFromTimestamp(seconds int64, nanos int32) *StartFrom {
+ return &StartFrom{
+ Value: &StartFrom_Timestamp{
+ Timestamp: ×tamppb.Timestamp{
+ Seconds: seconds,
+ Nanos: nanos,
+ },
+ },
+ }
+}
+
+// Creates a *StartFrom instance for a datetime string in the RFC3339 format.
+func NewStartFromDatetime(value string) *StartFrom {
+ return &StartFrom{
+ Value: &StartFrom_Datetime{
+ Datetime: value,
+ },
+ }
+}
+
+// Creates a *TargetVector instance from a *Vector.
+// This is an alias for NewTargetVector().
+func NewTarget(vector *Vector) *TargetVector {
+ return NewTargetVector(vector)
+}
+
+// Creates a *TargetVector instance from a *Vector.
+func NewTargetVector(vector *Vector) *TargetVector {
+ return &TargetVector{
+ Target: &TargetVector_Single{
+ Single: &VectorExample{
+ Example: &VectorExample_Vector{
+ Vector: vector,
+ },
+ },
+ },
+ }
+}
+
+// Creates a *TargetVector instance from a *PointId.
+func NewTargetID(id *PointId) *TargetVector {
+ return &TargetVector{
+ Target: &TargetVector_Single{
+ Single: &VectorExample{
+ Example: &VectorExample_Id{
+ Id: id,
+ },
+ },
+ },
+ }
+}
+
+// Creates a *Query instance for a nearest query from *VectorInput.
+func NewQueryNearest(nearest *VectorInput) *Query {
+ return &Query{
+ Variant: &Query_Nearest{
+ Nearest: nearest,
+ },
+ }
+}
+
+// Creates a *Query instance for a nearest query from dense vectors.
+// This is an alias for NewQueryDense().
+func NewQuery(values ...float32) *Query {
+ return NewQueryDense(values)
+}
+
+// Creates a *Query instance for a nearest query from dense vectors.
+func NewQueryDense(vector []float32) *Query {
+ return NewQueryNearest(NewVectorInputDense(vector))
+}
+
+// Creates a *Query instance for a nearest query from sparse vectors.
+func NewQuerySparse(indices []uint32, values []float32) *Query {
+ return NewQueryNearest(NewVectorInputSparse(indices, values))
+}
+
+// Creates a *Query instance for a nearest query from multi vectors.
+func NewQueryMulti(vectors [][]float32) *Query {
+ return NewQueryNearest(NewVectorInputMulti(vectors))
+}
+
+// Creates a *Query instance for a nearest query from *PointId.
+func NewQueryID(id *PointId) *Query {
+ return NewQueryNearest(NewVectorInputID(id))
+}
+
+// Creates a *Query instance for recommend query from *RecommendInput.
+func NewQueryRecommend(recommend *RecommendInput) *Query {
+ return &Query{
+ Variant: &Query_Recommend{
+ Recommend: recommend,
+ },
+ }
+}
+
+// Creates a *Query instance for a discover query from *DiscoverInput.
+func NewQueryDiscover(discover *DiscoverInput) *Query {
+ return &Query{
+ Variant: &Query_Discover{
+ Discover: discover,
+ },
+ }
+}
+
+// Creates a *Query instance for a context query from *ContextInput.
+func NewQueryContext(context *ContextInput) *Query {
+ return &Query{
+ Variant: &Query_Context{
+ Context: context,
+ },
+ }
+}
+
+// Creates a *Query instance for ordering points with *OrderBy.
+func NewQueryOrderBy(orderBy *OrderBy) *Query {
+ return &Query{
+ Variant: &Query_OrderBy{
+ OrderBy: orderBy,
+ },
+ }
+}
+
+// Creeates a *Query instance for combining prefetch results with Fusion.
+func NewQueryFusion(fusion Fusion) *Query {
+ return &Query{
+ Variant: &Query_Fusion{
+ Fusion: fusion,
+ },
+ }
+}
+
+// Creates a *Query instance for sampling points.
+func NewQuerySample(sample Sample) *Query {
+ return &Query{
+ Variant: &Query_Sample{
+ Sample: sample,
+ },
+ }
+}
+
+// Creates a *FacetValue instance from a string.
+func NewFacetValue(value string) *FacetValue {
+ return &FacetValue{
+ Variant: &FacetValue_StringValue{
+ StringValue: value,
+ },
+ }
+}
+
+// Creates a *PointsUpdateOperation instance for upserting points.
+func NewPointsUpdateUpsert(upsert *PointsUpdateOperation_PointStructList) *PointsUpdateOperation {
+ return &PointsUpdateOperation{
+ Operation: &PointsUpdateOperation_Upsert{
+ Upsert: upsert,
+ },
+ }
+}
+
+// Creates a *PointsUpdateOperation instance for setting payload.
+func NewPointsUpdateSetPayload(setPayload *PointsUpdateOperation_SetPayload) *PointsUpdateOperation {
+ return &PointsUpdateOperation{
+ Operation: &PointsUpdateOperation_SetPayload_{
+ SetPayload: setPayload,
+ },
+ }
+}
+
+// Creates a *PointsUpdateOperation instance for overwriting payload.
+func NewPointsUpdateOverwritePayload(overwritePayload *PointsUpdateOperation_OverwritePayload) *PointsUpdateOperation {
+ return &PointsUpdateOperation{
+ Operation: &PointsUpdateOperation_OverwritePayload_{
+ OverwritePayload: overwritePayload,
+ },
+ }
+}
+
+// Creates a *PointsUpdateOperation instance for deleting payload fields.
+func NewPointsUpdateDeletePayload(deletePayload *PointsUpdateOperation_DeletePayload) *PointsUpdateOperation {
+ return &PointsUpdateOperation{
+ Operation: &PointsUpdateOperation_DeletePayload_{
+ DeletePayload: deletePayload,
+ },
+ }
+}
+
+// Creates a *PointsUpdateOperation instance for updating vectors.
+func NewPointsUpdateUpdateVectors(updateVectors *PointsUpdateOperation_UpdateVectors) *PointsUpdateOperation {
+ return &PointsUpdateOperation{
+ Operation: &PointsUpdateOperation_UpdateVectors_{
+ UpdateVectors: updateVectors,
+ },
+ }
+}
+
+// Creates a *PointsUpdateOperation instance for deleting vectors.
+func NewPointsUpdateDeleteVectors(deleteVectors *PointsUpdateOperation_DeleteVectors) *PointsUpdateOperation {
+ return &PointsUpdateOperation{
+ Operation: &PointsUpdateOperation_DeleteVectors_{
+ DeleteVectors: deleteVectors,
+ },
+ }
+}
+
+// Creates a *PointsUpdateOperation instance for deleting points.
+func NewPointsUpdateDeletePoints(deletePoints *PointsUpdateOperation_DeletePoints) *PointsUpdateOperation {
+ return &PointsUpdateOperation{
+ Operation: &PointsUpdateOperation_DeletePoints_{
+ DeletePoints: deletePoints,
+ },
+ }
+}
+
+// Creates a *PointsUpdateOperation instance for clearing payload.
+func NewPointsUpdateClearPayload(clearPayload *PointsUpdateOperation_ClearPayload) *PointsUpdateOperation {
+ return &PointsUpdateOperation{
+ Operation: &PointsUpdateOperation_ClearPayload_{
+ ClearPayload: clearPayload,
+ },
+ }
+}
+
+// Creates a *OrderValue instance from an integer.
+func NewOrderValueInt(value int64) *OrderValue {
+ return &OrderValue{
+ Variant: &OrderValue_Int{
+ Int: value,
+ },
+ }
+}
+
+// Creates a *OrderValue instance from a float.
+func NewOrderValueFloat(value float64) *OrderValue {
+ return &OrderValue{
+ Variant: &OrderValue_Float{
+ Float: value,
+ },
+ }
+}
+
+// Creates a *OrderValue instance from an unsigned integer.
+func NewGroupIDUnsigned(value uint64) *GroupId {
+ return &GroupId{
+ Kind: &GroupId_UnsignedValue{
+ UnsignedValue: value,
+ },
+ }
+}
+
+// Creates a *GroupId instance from an integer.
+func NewGroupIDInt(value int64) *GroupId {
+ return &GroupId{
+ Kind: &GroupId_IntegerValue{
+ IntegerValue: value,
+ },
+ }
+}
+
+// Creates a *GroupId instance from a string.
+func NewGroupIDString(value string) *GroupId {
+ return &GroupId{
+ Kind: &GroupId_StringValue{
+ StringValue: value,
+ },
+ }
+}
+
+// Creates a *PointsSelector instance for selecting points by IDs.
+// This is an alias for NewPointsSelectorIDs().
+func NewPointsSelector(ids ...*PointId) *PointsSelector {
+ return NewPointsSelectorIDs(ids)
+}
+
+// Creates a *PointsSelector instance for selecting points by filter.
+func NewPointsSelectorFilter(filter *Filter) *PointsSelector {
+ return &PointsSelector{
+ PointsSelectorOneOf: &PointsSelector_Filter{
+ Filter: filter,
+ },
+ }
+}
+
+// Creates a *PointsSelector instance for selecting points by IDs.
+func NewPointsSelectorIDs(ids []*PointId) *PointsSelector {
+ return &PointsSelector{
+ PointsSelectorOneOf: &PointsSelector_Points{
+ Points: &PointsIdsList{
+ Ids: ids,
+ },
+ },
+ }
+}
+
+// Creates a pointer to a value of any type.
+func PtrOf[T any](t T) *T {
+ return &t
+}
diff --git a/qdrant/points.go b/qdrant/points.go
new file mode 100644
index 0000000..683c2c0
--- /dev/null
+++ b/qdrant/points.go
@@ -0,0 +1,302 @@
+package qdrant
+
+import (
+ "context"
+)
+
+// Performs insert + updates on points. If a point with a given ID already exists, it will be overwritten.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The UpsertPoints request containing the points to upsert.
+//
+// Returns:
+// - *UpdateResult: The result of the upsert operation.
+// - error: An error if the operation fails.
+func (c *Client) Upsert(ctx context.Context, request *UpsertPoints) (*UpdateResult, error) {
+ resp, err := c.GetPointsClient().Upsert(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "Upsert", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Removes points from a collection by IDs or payload filters.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The DeletePoints request specifying which points to delete.
+//
+// Returns:
+// - *UpdateResult: The result of the delete operation.
+// - error: An error if the operation fails.
+func (c *Client) Delete(ctx context.Context, request *DeletePoints) (*UpdateResult, error) {
+ resp, err := c.GetPointsClient().Delete(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "Delete", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Retrieves points from a collection by IDs.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The GetPoints request specifying which points to retrieve.
+//
+// Returns:
+// - []*RetrievedPoint: A slice of retrieved points.
+// - error: An error if the operation fails.
+func (c *Client) Get(ctx context.Context, request *GetPoints) ([]*RetrievedPoint, error) {
+ resp, err := c.GetPointsClient().Get(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "Get", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Iterates over all or filtered points in a collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The ScrollPoints request specifying the scroll parameters.
+//
+// Returns:
+// - []*RetrievedPoint: A slice of retrieved points.
+// - error: An error if the operation fails.
+func (c *Client) Scroll(ctx context.Context, request *ScrollPoints) ([]*RetrievedPoint, error) {
+ resp, err := c.GetPointsClient().Scroll(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "Scroll", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Updates vectors for points in a collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The UpdatePointVectors request containing the vectors to update.
+//
+// Returns:
+// - *UpdateResult: The result of the update operation.
+// - error: An error if the operation fails.
+func (c *Client) UpdateVectors(ctx context.Context, request *UpdatePointVectors) (*UpdateResult, error) {
+ resp, err := c.GetPointsClient().UpdateVectors(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "UpdateVectors", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Removes vectors from points in a collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The DeletePointVectors request specifying which vectors to delete.
+//
+// Returns:
+// - *UpdateResult: The result of the delete operation.
+// - error: An error if the operation fails.
+func (c *Client) DeleteVectors(ctx context.Context, request *DeletePointVectors) (*UpdateResult, error) {
+ resp, err := c.GetPointsClient().DeleteVectors(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "DeleteVectors", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Sets payload fields for points in a collection.
+// Can be used to add new payload fields or update existing ones.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The SetPayloadPoints request containing the payload to set.
+//
+// Returns:
+// - *UpdateResult: The result of the set operation.
+// - error: An error if the operation fails.
+func (c *Client) SetPayload(ctx context.Context, request *SetPayloadPoints) (*UpdateResult, error) {
+ resp, err := c.GetPointsClient().SetPayload(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "SetPayload", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Overwrites the entire payload for points in a collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The SetPayloadPoints request containing the payload to overwrite.
+//
+// Returns:
+// - *UpdateResult: The result of the overwrite operation.
+// - error: An error if the operation fails.
+func (c *Client) OverwritePayload(ctx context.Context, request *SetPayloadPoints) (*UpdateResult, error) {
+ resp, err := c.GetPointsClient().OverwritePayload(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "OverwritePayload", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Removes payload fields from points in a collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The DeletePayloadPoints request specifying which payload fields to delete.
+//
+// Returns:
+// - *UpdateResult: The result of the delete operation.
+// - error: An error if the operation fails.
+func (c *Client) DeletePayload(ctx context.Context, request *DeletePayloadPoints) (*UpdateResult, error) {
+ resp, err := c.GetPointsClient().DeletePayload(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "DeletePayload", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Removes all payload fields from points in a collection.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The ClearPayloadPoints request specifying which points to clear.
+//
+// Returns:
+// - *UpdateResult: The result of the clear operation.
+// - error: An error if the operation fails.
+func (c *Client) ClearPayload(ctx context.Context, request *ClearPayloadPoints) (*UpdateResult, error) {
+ resp, err := c.GetPointsClient().ClearPayload(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "ClearPayload", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Creates an index for a payload field.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The CreateFieldIndexCollection request specifying the field to index.
+//
+// Returns:
+// - *UpdateResult: The result of the index creation operation.
+// - error: An error if the operation fails.
+func (c *Client) CreateFieldIndex(ctx context.Context, request *CreateFieldIndexCollection) (*UpdateResult, error) {
+ resp, err := c.GetPointsClient().CreateFieldIndex(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "CreateFieldIndex", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Removes an index for a payload field.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The DeleteFieldIndexCollection request specifying the field index to delete.
+//
+// Returns:
+// - *UpdateResult: The result of the index deletion operation.
+// - error: An error if the operation fails.
+func (c *Client) DeleteFieldIndex(ctx context.Context, request *DeleteFieldIndexCollection) (*UpdateResult, error) {
+ resp, err := c.GetPointsClient().DeleteFieldIndex(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "DeleteFieldIndex", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Returns the number of points in a collection with given filtering conditions.
+// Gets the total count if no filter is provided.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The CountPoints request containing optional filtering conditions.
+//
+// Returns:
+// - uint64: The count of points matching the conditions.
+// - error: An error if the operation fails.
+func (c *Client) Count(ctx context.Context, request *CountPoints) (uint64, error) {
+ resp, err := c.GetPointsClient().Count(ctx, request)
+ if err != nil {
+ return 0, newQdrantErr(err, "Count", request.GetCollectionName())
+ }
+ return resp.GetResult().GetCount(), nil
+}
+
+// Performs multiple update operations in one request.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The UpdateBatchPoints request containing multiple update operations.
+//
+// Returns:
+// - []*UpdateResult: A slice of results for each update operation.
+// - error: An error if the operation fails.
+func (c *Client) UpdateBatch(ctx context.Context, request *UpdateBatchPoints) ([]*UpdateResult, error) {
+ resp, err := c.GetPointsClient().UpdateBatch(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "UpdateBatch", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Performs a universal query on points.
+// Covers all capabilities of search, recommend, discover, filters.
+// Also enables hybrid and multi-stage queries.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The QueryPoints request containing the query parameters.
+//
+// Returns:
+// - []*ScoredPoint: A slice of scored points matching the query.
+// - error: An error if the operation fails.
+func (c *Client) Query(ctx context.Context, request *QueryPoints) ([]*ScoredPoint, error) {
+ resp, err := c.GetPointsClient().Query(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "Query", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Performs multiple universal queries on points in a batch.
+// Covers all capabilities of search, recommend, discover, filters.
+// Also enables hybrid and multi-stage queries.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The QueryBatchPoints request containing multiple query parameters.
+//
+// Returns:
+// - []*BatchResult: A slice of batch results for each query.
+// - error: An error if the operation fails.
+func (c *Client) QueryBatch(ctx context.Context, request *QueryBatchPoints) ([]*BatchResult, error) {
+ resp, err := c.GetPointsClient().QueryBatch(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "QueryBatch", request.GetCollectionName())
+ }
+ return resp.GetResult(), nil
+}
+
+// Performs a universal query on points grouped by a payload field.
+// Covers all capabilities of search, recommend, discover, filters.
+// Also enables hybrid and multi-stage queries.
+//
+// Parameters:
+// - ctx: The context for the request.
+// - request: The QueryPointGroups request containing the query parameters.
+//
+// Returns:
+// - []*PointGroup: A slice of point groups matching the query.
+// - error: An error if the operation fails.
+func (c *Client) QueryGroups(ctx context.Context, request *QueryPointGroups) ([]*PointGroup, error) {
+ resp, err := c.GetPointsClient().QueryGroups(ctx, request)
+ if err != nil {
+ return nil, newQdrantErr(err, "QueryGroups", request.GetCollectionName())
+ }
+ return resp.GetResult().GetGroups(), nil
+}
diff --git a/qdrant/points.pb.go b/qdrant/points.pb.go
index 5da94a5..26cafe4 100644
--- a/qdrant/points.pb.go
+++ b/qdrant/points.pb.go
@@ -1,10 +1,10 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.26.0
-// protoc v4.22.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.1
// source: points.proto
-package go_client
+package qdrant
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -5379,7 +5379,7 @@ func (x *PointsUpdateOperation) GetUpsert() *PointsUpdateOperation_PointStructLi
return nil
}
-// Deprecated: Do not use.
+// Deprecated: Marked as deprecated in points.proto.
func (x *PointsUpdateOperation) GetDeleteDeprecated() *PointsSelector {
if x, ok := x.GetOperation().(*PointsUpdateOperation_DeleteDeprecated); ok {
return x.DeleteDeprecated
@@ -5408,7 +5408,7 @@ func (x *PointsUpdateOperation) GetDeletePayload() *PointsUpdateOperation_Delete
return nil
}
-// Deprecated: Do not use.
+// Deprecated: Marked as deprecated in points.proto.
func (x *PointsUpdateOperation) GetClearPayloadDeprecated() *PointsSelector {
if x, ok := x.GetOperation().(*PointsUpdateOperation_ClearPayloadDeprecated); ok {
return x.ClearPayloadDeprecated
@@ -5453,7 +5453,7 @@ type PointsUpdateOperation_Upsert struct {
}
type PointsUpdateOperation_DeleteDeprecated struct {
- // Deprecated: Do not use.
+ // Deprecated: Marked as deprecated in points.proto.
DeleteDeprecated *PointsSelector `protobuf:"bytes,2,opt,name=delete_deprecated,json=deleteDeprecated,proto3,oneof"`
}
@@ -5470,7 +5470,7 @@ type PointsUpdateOperation_DeletePayload_ struct {
}
type PointsUpdateOperation_ClearPayloadDeprecated struct {
- // Deprecated: Do not use.
+ // Deprecated: Marked as deprecated in points.proto.
ClearPayloadDeprecated *PointsSelector `protobuf:"bytes,6,opt,name=clear_payload_deprecated,json=clearPayloadDeprecated,proto3,oneof"`
}
@@ -10753,7 +10753,7 @@ func file_points_proto_rawDescGZIP() []byte {
var file_points_proto_enumTypes = make([]protoimpl.EnumInfo, 8)
var file_points_proto_msgTypes = make([]protoimpl.MessageInfo, 122)
-var file_points_proto_goTypes = []interface{}{
+var file_points_proto_goTypes = []any{
(WriteOrderingType)(0), // 0: qdrant.WriteOrderingType
(ReadConsistencyType)(0), // 1: qdrant.ReadConsistencyType
(FieldType)(0), // 2: qdrant.FieldType
@@ -11184,7 +11184,7 @@ func file_points_proto_init() {
file_collections_proto_init()
file_json_with_int_proto_init()
if !protoimpl.UnsafeEnabled {
- file_points_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*WriteOrdering); i {
case 0:
return &v.state
@@ -11196,7 +11196,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ReadConsistency); i {
case 0:
return &v.state
@@ -11208,7 +11208,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*PointId); i {
case 0:
return &v.state
@@ -11220,7 +11220,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*SparseIndices); i {
case 0:
return &v.state
@@ -11232,7 +11232,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*Vector); i {
case 0:
return &v.state
@@ -11244,7 +11244,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*DenseVector); i {
case 0:
return &v.state
@@ -11256,7 +11256,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*SparseVector); i {
case 0:
return &v.state
@@ -11268,7 +11268,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*MultiDenseVector); i {
case 0:
return &v.state
@@ -11280,7 +11280,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*VectorInput); i {
case 0:
return &v.state
@@ -11292,7 +11292,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*ShardKeySelector); i {
case 0:
return &v.state
@@ -11304,7 +11304,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[10].Exporter = func(v any, i int) any {
switch v := v.(*UpsertPoints); i {
case 0:
return &v.state
@@ -11316,7 +11316,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[11].Exporter = func(v any, i int) any {
switch v := v.(*DeletePoints); i {
case 0:
return &v.state
@@ -11328,7 +11328,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[12].Exporter = func(v any, i int) any {
switch v := v.(*GetPoints); i {
case 0:
return &v.state
@@ -11340,7 +11340,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[13].Exporter = func(v any, i int) any {
switch v := v.(*UpdatePointVectors); i {
case 0:
return &v.state
@@ -11352,7 +11352,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[14].Exporter = func(v any, i int) any {
switch v := v.(*PointVectors); i {
case 0:
return &v.state
@@ -11364,7 +11364,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[15].Exporter = func(v any, i int) any {
switch v := v.(*DeletePointVectors); i {
case 0:
return &v.state
@@ -11376,7 +11376,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[16].Exporter = func(v any, i int) any {
switch v := v.(*SetPayloadPoints); i {
case 0:
return &v.state
@@ -11388,7 +11388,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[17].Exporter = func(v any, i int) any {
switch v := v.(*DeletePayloadPoints); i {
case 0:
return &v.state
@@ -11400,7 +11400,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[18].Exporter = func(v any, i int) any {
switch v := v.(*ClearPayloadPoints); i {
case 0:
return &v.state
@@ -11412,7 +11412,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[19].Exporter = func(v any, i int) any {
switch v := v.(*CreateFieldIndexCollection); i {
case 0:
return &v.state
@@ -11424,7 +11424,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[20].Exporter = func(v any, i int) any {
switch v := v.(*DeleteFieldIndexCollection); i {
case 0:
return &v.state
@@ -11436,7 +11436,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[21].Exporter = func(v any, i int) any {
switch v := v.(*PayloadIncludeSelector); i {
case 0:
return &v.state
@@ -11448,7 +11448,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[22].Exporter = func(v any, i int) any {
switch v := v.(*PayloadExcludeSelector); i {
case 0:
return &v.state
@@ -11460,7 +11460,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[23].Exporter = func(v any, i int) any {
switch v := v.(*WithPayloadSelector); i {
case 0:
return &v.state
@@ -11472,7 +11472,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[24].Exporter = func(v any, i int) any {
switch v := v.(*NamedVectors); i {
case 0:
return &v.state
@@ -11484,7 +11484,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[25].Exporter = func(v any, i int) any {
switch v := v.(*Vectors); i {
case 0:
return &v.state
@@ -11496,7 +11496,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[26].Exporter = func(v any, i int) any {
switch v := v.(*VectorsSelector); i {
case 0:
return &v.state
@@ -11508,7 +11508,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[27].Exporter = func(v any, i int) any {
switch v := v.(*WithVectorsSelector); i {
case 0:
return &v.state
@@ -11520,7 +11520,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[28].Exporter = func(v any, i int) any {
switch v := v.(*QuantizationSearchParams); i {
case 0:
return &v.state
@@ -11532,7 +11532,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[29].Exporter = func(v any, i int) any {
switch v := v.(*SearchParams); i {
case 0:
return &v.state
@@ -11544,7 +11544,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[30].Exporter = func(v any, i int) any {
switch v := v.(*SearchPoints); i {
case 0:
return &v.state
@@ -11556,7 +11556,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[31].Exporter = func(v any, i int) any {
switch v := v.(*SearchBatchPoints); i {
case 0:
return &v.state
@@ -11568,7 +11568,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[32].Exporter = func(v any, i int) any {
switch v := v.(*WithLookup); i {
case 0:
return &v.state
@@ -11580,7 +11580,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[33].Exporter = func(v any, i int) any {
switch v := v.(*SearchPointGroups); i {
case 0:
return &v.state
@@ -11592,7 +11592,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[34].Exporter = func(v any, i int) any {
switch v := v.(*StartFrom); i {
case 0:
return &v.state
@@ -11604,7 +11604,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[35].Exporter = func(v any, i int) any {
switch v := v.(*OrderBy); i {
case 0:
return &v.state
@@ -11616,7 +11616,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[36].Exporter = func(v any, i int) any {
switch v := v.(*ScrollPoints); i {
case 0:
return &v.state
@@ -11628,7 +11628,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[37].Exporter = func(v any, i int) any {
switch v := v.(*LookupLocation); i {
case 0:
return &v.state
@@ -11640,7 +11640,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[38].Exporter = func(v any, i int) any {
switch v := v.(*RecommendPoints); i {
case 0:
return &v.state
@@ -11652,7 +11652,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[39].Exporter = func(v any, i int) any {
switch v := v.(*RecommendBatchPoints); i {
case 0:
return &v.state
@@ -11664,7 +11664,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[40].Exporter = func(v any, i int) any {
switch v := v.(*RecommendPointGroups); i {
case 0:
return &v.state
@@ -11676,7 +11676,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[41].Exporter = func(v any, i int) any {
switch v := v.(*TargetVector); i {
case 0:
return &v.state
@@ -11688,7 +11688,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[42].Exporter = func(v any, i int) any {
switch v := v.(*VectorExample); i {
case 0:
return &v.state
@@ -11700,7 +11700,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[43].Exporter = func(v any, i int) any {
switch v := v.(*ContextExamplePair); i {
case 0:
return &v.state
@@ -11712,7 +11712,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[44].Exporter = func(v any, i int) any {
switch v := v.(*DiscoverPoints); i {
case 0:
return &v.state
@@ -11724,7 +11724,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[45].Exporter = func(v any, i int) any {
switch v := v.(*DiscoverBatchPoints); i {
case 0:
return &v.state
@@ -11736,7 +11736,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[46].Exporter = func(v any, i int) any {
switch v := v.(*CountPoints); i {
case 0:
return &v.state
@@ -11748,7 +11748,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[47].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[47].Exporter = func(v any, i int) any {
switch v := v.(*RecommendInput); i {
case 0:
return &v.state
@@ -11760,7 +11760,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[48].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[48].Exporter = func(v any, i int) any {
switch v := v.(*ContextInputPair); i {
case 0:
return &v.state
@@ -11772,7 +11772,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[49].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[49].Exporter = func(v any, i int) any {
switch v := v.(*DiscoverInput); i {
case 0:
return &v.state
@@ -11784,7 +11784,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[50].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[50].Exporter = func(v any, i int) any {
switch v := v.(*ContextInput); i {
case 0:
return &v.state
@@ -11796,7 +11796,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[51].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[51].Exporter = func(v any, i int) any {
switch v := v.(*Query); i {
case 0:
return &v.state
@@ -11808,7 +11808,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[52].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[52].Exporter = func(v any, i int) any {
switch v := v.(*PrefetchQuery); i {
case 0:
return &v.state
@@ -11820,7 +11820,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[53].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[53].Exporter = func(v any, i int) any {
switch v := v.(*QueryPoints); i {
case 0:
return &v.state
@@ -11832,7 +11832,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[54].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[54].Exporter = func(v any, i int) any {
switch v := v.(*QueryBatchPoints); i {
case 0:
return &v.state
@@ -11844,7 +11844,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[55].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[55].Exporter = func(v any, i int) any {
switch v := v.(*QueryPointGroups); i {
case 0:
return &v.state
@@ -11856,7 +11856,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[56].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[56].Exporter = func(v any, i int) any {
switch v := v.(*FacetValue); i {
case 0:
return &v.state
@@ -11868,7 +11868,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[57].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[57].Exporter = func(v any, i int) any {
switch v := v.(*FacetValueHit); i {
case 0:
return &v.state
@@ -11880,7 +11880,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[58].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[58].Exporter = func(v any, i int) any {
switch v := v.(*PointsUpdateOperation); i {
case 0:
return &v.state
@@ -11892,7 +11892,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[59].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[59].Exporter = func(v any, i int) any {
switch v := v.(*UpdateBatchPoints); i {
case 0:
return &v.state
@@ -11904,7 +11904,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[60].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[60].Exporter = func(v any, i int) any {
switch v := v.(*PointsOperationResponse); i {
case 0:
return &v.state
@@ -11916,7 +11916,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[61].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[61].Exporter = func(v any, i int) any {
switch v := v.(*UpdateResult); i {
case 0:
return &v.state
@@ -11928,7 +11928,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[62].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[62].Exporter = func(v any, i int) any {
switch v := v.(*OrderValue); i {
case 0:
return &v.state
@@ -11940,7 +11940,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[63].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[63].Exporter = func(v any, i int) any {
switch v := v.(*ScoredPoint); i {
case 0:
return &v.state
@@ -11952,7 +11952,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[64].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[64].Exporter = func(v any, i int) any {
switch v := v.(*GroupId); i {
case 0:
return &v.state
@@ -11964,7 +11964,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[65].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[65].Exporter = func(v any, i int) any {
switch v := v.(*PointGroup); i {
case 0:
return &v.state
@@ -11976,7 +11976,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[66].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[66].Exporter = func(v any, i int) any {
switch v := v.(*GroupsResult); i {
case 0:
return &v.state
@@ -11988,7 +11988,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[67].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[67].Exporter = func(v any, i int) any {
switch v := v.(*SearchResponse); i {
case 0:
return &v.state
@@ -12000,7 +12000,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[68].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[68].Exporter = func(v any, i int) any {
switch v := v.(*QueryResponse); i {
case 0:
return &v.state
@@ -12012,7 +12012,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[69].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[69].Exporter = func(v any, i int) any {
switch v := v.(*QueryBatchResponse); i {
case 0:
return &v.state
@@ -12024,7 +12024,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[70].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[70].Exporter = func(v any, i int) any {
switch v := v.(*QueryGroupsResponse); i {
case 0:
return &v.state
@@ -12036,7 +12036,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[71].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[71].Exporter = func(v any, i int) any {
switch v := v.(*BatchResult); i {
case 0:
return &v.state
@@ -12048,7 +12048,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[72].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[72].Exporter = func(v any, i int) any {
switch v := v.(*SearchBatchResponse); i {
case 0:
return &v.state
@@ -12060,7 +12060,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[73].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[73].Exporter = func(v any, i int) any {
switch v := v.(*SearchGroupsResponse); i {
case 0:
return &v.state
@@ -12072,7 +12072,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[74].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[74].Exporter = func(v any, i int) any {
switch v := v.(*CountResponse); i {
case 0:
return &v.state
@@ -12084,7 +12084,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[75].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[75].Exporter = func(v any, i int) any {
switch v := v.(*ScrollResponse); i {
case 0:
return &v.state
@@ -12096,7 +12096,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[76].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[76].Exporter = func(v any, i int) any {
switch v := v.(*CountResult); i {
case 0:
return &v.state
@@ -12108,7 +12108,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[77].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[77].Exporter = func(v any, i int) any {
switch v := v.(*RetrievedPoint); i {
case 0:
return &v.state
@@ -12120,7 +12120,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[78].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[78].Exporter = func(v any, i int) any {
switch v := v.(*GetResponse); i {
case 0:
return &v.state
@@ -12132,7 +12132,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[79].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[79].Exporter = func(v any, i int) any {
switch v := v.(*RecommendResponse); i {
case 0:
return &v.state
@@ -12144,7 +12144,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[80].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[80].Exporter = func(v any, i int) any {
switch v := v.(*RecommendBatchResponse); i {
case 0:
return &v.state
@@ -12156,7 +12156,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[81].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[81].Exporter = func(v any, i int) any {
switch v := v.(*DiscoverResponse); i {
case 0:
return &v.state
@@ -12168,7 +12168,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[82].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[82].Exporter = func(v any, i int) any {
switch v := v.(*DiscoverBatchResponse); i {
case 0:
return &v.state
@@ -12180,7 +12180,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[83].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[83].Exporter = func(v any, i int) any {
switch v := v.(*RecommendGroupsResponse); i {
case 0:
return &v.state
@@ -12192,7 +12192,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[84].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[84].Exporter = func(v any, i int) any {
switch v := v.(*UpdateBatchResponse); i {
case 0:
return &v.state
@@ -12204,7 +12204,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[85].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[85].Exporter = func(v any, i int) any {
switch v := v.(*Filter); i {
case 0:
return &v.state
@@ -12216,7 +12216,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[86].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[86].Exporter = func(v any, i int) any {
switch v := v.(*MinShould); i {
case 0:
return &v.state
@@ -12228,7 +12228,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[87].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[87].Exporter = func(v any, i int) any {
switch v := v.(*Condition); i {
case 0:
return &v.state
@@ -12240,7 +12240,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[88].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[88].Exporter = func(v any, i int) any {
switch v := v.(*IsEmptyCondition); i {
case 0:
return &v.state
@@ -12252,7 +12252,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[89].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[89].Exporter = func(v any, i int) any {
switch v := v.(*IsNullCondition); i {
case 0:
return &v.state
@@ -12264,7 +12264,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[90].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[90].Exporter = func(v any, i int) any {
switch v := v.(*HasIdCondition); i {
case 0:
return &v.state
@@ -12276,7 +12276,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[91].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[91].Exporter = func(v any, i int) any {
switch v := v.(*NestedCondition); i {
case 0:
return &v.state
@@ -12288,7 +12288,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[92].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[92].Exporter = func(v any, i int) any {
switch v := v.(*FieldCondition); i {
case 0:
return &v.state
@@ -12300,7 +12300,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[93].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[93].Exporter = func(v any, i int) any {
switch v := v.(*Match); i {
case 0:
return &v.state
@@ -12312,7 +12312,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[94].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[94].Exporter = func(v any, i int) any {
switch v := v.(*RepeatedStrings); i {
case 0:
return &v.state
@@ -12324,7 +12324,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[95].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[95].Exporter = func(v any, i int) any {
switch v := v.(*RepeatedIntegers); i {
case 0:
return &v.state
@@ -12336,7 +12336,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[96].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[96].Exporter = func(v any, i int) any {
switch v := v.(*Range); i {
case 0:
return &v.state
@@ -12348,7 +12348,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[97].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[97].Exporter = func(v any, i int) any {
switch v := v.(*DatetimeRange); i {
case 0:
return &v.state
@@ -12360,7 +12360,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[98].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[98].Exporter = func(v any, i int) any {
switch v := v.(*GeoBoundingBox); i {
case 0:
return &v.state
@@ -12372,7 +12372,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[99].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[99].Exporter = func(v any, i int) any {
switch v := v.(*GeoRadius); i {
case 0:
return &v.state
@@ -12384,7 +12384,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[100].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[100].Exporter = func(v any, i int) any {
switch v := v.(*GeoLineString); i {
case 0:
return &v.state
@@ -12396,7 +12396,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[101].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[101].Exporter = func(v any, i int) any {
switch v := v.(*GeoPolygon); i {
case 0:
return &v.state
@@ -12408,7 +12408,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[102].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[102].Exporter = func(v any, i int) any {
switch v := v.(*ValuesCount); i {
case 0:
return &v.state
@@ -12420,7 +12420,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[103].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[103].Exporter = func(v any, i int) any {
switch v := v.(*PointsSelector); i {
case 0:
return &v.state
@@ -12432,7 +12432,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[104].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[104].Exporter = func(v any, i int) any {
switch v := v.(*PointsIdsList); i {
case 0:
return &v.state
@@ -12444,7 +12444,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[105].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[105].Exporter = func(v any, i int) any {
switch v := v.(*PointStruct); i {
case 0:
return &v.state
@@ -12456,7 +12456,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[106].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[106].Exporter = func(v any, i int) any {
switch v := v.(*GeoPoint); i {
case 0:
return &v.state
@@ -12468,7 +12468,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[109].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[109].Exporter = func(v any, i int) any {
switch v := v.(*PointsUpdateOperation_PointStructList); i {
case 0:
return &v.state
@@ -12480,7 +12480,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[110].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[110].Exporter = func(v any, i int) any {
switch v := v.(*PointsUpdateOperation_SetPayload); i {
case 0:
return &v.state
@@ -12492,7 +12492,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[111].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[111].Exporter = func(v any, i int) any {
switch v := v.(*PointsUpdateOperation_OverwritePayload); i {
case 0:
return &v.state
@@ -12504,7 +12504,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[112].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[112].Exporter = func(v any, i int) any {
switch v := v.(*PointsUpdateOperation_DeletePayload); i {
case 0:
return &v.state
@@ -12516,7 +12516,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[113].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[113].Exporter = func(v any, i int) any {
switch v := v.(*PointsUpdateOperation_UpdateVectors); i {
case 0:
return &v.state
@@ -12528,7 +12528,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[114].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[114].Exporter = func(v any, i int) any {
switch v := v.(*PointsUpdateOperation_DeleteVectors); i {
case 0:
return &v.state
@@ -12540,7 +12540,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[115].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[115].Exporter = func(v any, i int) any {
switch v := v.(*PointsUpdateOperation_DeletePoints); i {
case 0:
return &v.state
@@ -12552,7 +12552,7 @@ func file_points_proto_init() {
return nil
}
}
- file_points_proto_msgTypes[116].Exporter = func(v interface{}, i int) interface{} {
+ file_points_proto_msgTypes[116].Exporter = func(v any, i int) any {
switch v := v.(*PointsUpdateOperation_ClearPayload); i {
case 0:
return &v.state
@@ -12565,74 +12565,74 @@ func file_points_proto_init() {
}
}
}
- file_points_proto_msgTypes[1].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[1].OneofWrappers = []any{
(*ReadConsistency_Type)(nil),
(*ReadConsistency_Factor)(nil),
}
- file_points_proto_msgTypes[2].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[2].OneofWrappers = []any{
(*PointId_Num)(nil),
(*PointId_Uuid)(nil),
}
- file_points_proto_msgTypes[4].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[8].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[4].OneofWrappers = []any{}
+ file_points_proto_msgTypes[8].OneofWrappers = []any{
(*VectorInput_Id)(nil),
(*VectorInput_Dense)(nil),
(*VectorInput_Sparse)(nil),
(*VectorInput_MultiDense)(nil),
}
- file_points_proto_msgTypes[10].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[11].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[12].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[13].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[15].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[16].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[17].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[18].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[19].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[20].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[23].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[10].OneofWrappers = []any{}
+ file_points_proto_msgTypes[11].OneofWrappers = []any{}
+ file_points_proto_msgTypes[12].OneofWrappers = []any{}
+ file_points_proto_msgTypes[13].OneofWrappers = []any{}
+ file_points_proto_msgTypes[15].OneofWrappers = []any{}
+ file_points_proto_msgTypes[16].OneofWrappers = []any{}
+ file_points_proto_msgTypes[17].OneofWrappers = []any{}
+ file_points_proto_msgTypes[18].OneofWrappers = []any{}
+ file_points_proto_msgTypes[19].OneofWrappers = []any{}
+ file_points_proto_msgTypes[20].OneofWrappers = []any{}
+ file_points_proto_msgTypes[23].OneofWrappers = []any{
(*WithPayloadSelector_Enable)(nil),
(*WithPayloadSelector_Include)(nil),
(*WithPayloadSelector_Exclude)(nil),
}
- file_points_proto_msgTypes[25].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[25].OneofWrappers = []any{
(*Vectors_Vector)(nil),
(*Vectors_Vectors)(nil),
}
- file_points_proto_msgTypes[27].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[27].OneofWrappers = []any{
(*WithVectorsSelector_Enable)(nil),
(*WithVectorsSelector_Include)(nil),
}
- file_points_proto_msgTypes[28].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[29].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[30].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[31].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[32].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[33].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[34].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[28].OneofWrappers = []any{}
+ file_points_proto_msgTypes[29].OneofWrappers = []any{}
+ file_points_proto_msgTypes[30].OneofWrappers = []any{}
+ file_points_proto_msgTypes[31].OneofWrappers = []any{}
+ file_points_proto_msgTypes[32].OneofWrappers = []any{}
+ file_points_proto_msgTypes[33].OneofWrappers = []any{}
+ file_points_proto_msgTypes[34].OneofWrappers = []any{
(*StartFrom_Float)(nil),
(*StartFrom_Integer)(nil),
(*StartFrom_Timestamp)(nil),
(*StartFrom_Datetime)(nil),
}
- file_points_proto_msgTypes[35].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[36].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[37].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[38].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[39].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[40].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[41].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[35].OneofWrappers = []any{}
+ file_points_proto_msgTypes[36].OneofWrappers = []any{}
+ file_points_proto_msgTypes[37].OneofWrappers = []any{}
+ file_points_proto_msgTypes[38].OneofWrappers = []any{}
+ file_points_proto_msgTypes[39].OneofWrappers = []any{}
+ file_points_proto_msgTypes[40].OneofWrappers = []any{}
+ file_points_proto_msgTypes[41].OneofWrappers = []any{
(*TargetVector_Single)(nil),
}
- file_points_proto_msgTypes[42].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[42].OneofWrappers = []any{
(*VectorExample_Id)(nil),
(*VectorExample_Vector)(nil),
}
- file_points_proto_msgTypes[44].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[45].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[46].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[47].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[51].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[44].OneofWrappers = []any{}
+ file_points_proto_msgTypes[45].OneofWrappers = []any{}
+ file_points_proto_msgTypes[46].OneofWrappers = []any{}
+ file_points_proto_msgTypes[47].OneofWrappers = []any{}
+ file_points_proto_msgTypes[51].OneofWrappers = []any{
(*Query_Nearest)(nil),
(*Query_Recommend)(nil),
(*Query_Discover)(nil),
@@ -12641,14 +12641,14 @@ func file_points_proto_init() {
(*Query_Fusion)(nil),
(*Query_Sample)(nil),
}
- file_points_proto_msgTypes[52].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[53].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[54].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[55].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[56].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[52].OneofWrappers = []any{}
+ file_points_proto_msgTypes[53].OneofWrappers = []any{}
+ file_points_proto_msgTypes[54].OneofWrappers = []any{}
+ file_points_proto_msgTypes[55].OneofWrappers = []any{}
+ file_points_proto_msgTypes[56].OneofWrappers = []any{
(*FacetValue_StringValue)(nil),
}
- file_points_proto_msgTypes[58].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[58].OneofWrappers = []any{
(*PointsUpdateOperation_Upsert)(nil),
(*PointsUpdateOperation_DeleteDeprecated)(nil),
(*PointsUpdateOperation_SetPayload_)(nil),
@@ -12660,22 +12660,22 @@ func file_points_proto_init() {
(*PointsUpdateOperation_DeletePoints_)(nil),
(*PointsUpdateOperation_ClearPayload_)(nil),
}
- file_points_proto_msgTypes[59].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[61].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[62].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[59].OneofWrappers = []any{}
+ file_points_proto_msgTypes[61].OneofWrappers = []any{}
+ file_points_proto_msgTypes[62].OneofWrappers = []any{
(*OrderValue_Int)(nil),
(*OrderValue_Float)(nil),
}
- file_points_proto_msgTypes[63].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[64].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[63].OneofWrappers = []any{}
+ file_points_proto_msgTypes[64].OneofWrappers = []any{
(*GroupId_UnsignedValue)(nil),
(*GroupId_IntegerValue)(nil),
(*GroupId_StringValue)(nil),
}
- file_points_proto_msgTypes[75].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[77].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[85].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[87].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[75].OneofWrappers = []any{}
+ file_points_proto_msgTypes[77].OneofWrappers = []any{}
+ file_points_proto_msgTypes[85].OneofWrappers = []any{}
+ file_points_proto_msgTypes[87].OneofWrappers = []any{
(*Condition_Field)(nil),
(*Condition_IsEmpty)(nil),
(*Condition_HasId)(nil),
@@ -12683,7 +12683,7 @@ func file_points_proto_init() {
(*Condition_IsNull)(nil),
(*Condition_Nested)(nil),
}
- file_points_proto_msgTypes[93].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[93].OneofWrappers = []any{
(*Match_Keyword)(nil),
(*Match_Integer)(nil),
(*Match_Boolean)(nil),
@@ -12693,22 +12693,22 @@ func file_points_proto_init() {
(*Match_ExceptIntegers)(nil),
(*Match_ExceptKeywords)(nil),
}
- file_points_proto_msgTypes[96].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[97].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[102].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[103].OneofWrappers = []interface{}{
+ file_points_proto_msgTypes[96].OneofWrappers = []any{}
+ file_points_proto_msgTypes[97].OneofWrappers = []any{}
+ file_points_proto_msgTypes[102].OneofWrappers = []any{}
+ file_points_proto_msgTypes[103].OneofWrappers = []any{
(*PointsSelector_Points)(nil),
(*PointsSelector_Filter)(nil),
}
- file_points_proto_msgTypes[105].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[109].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[110].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[111].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[112].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[113].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[114].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[115].OneofWrappers = []interface{}{}
- file_points_proto_msgTypes[116].OneofWrappers = []interface{}{}
+ file_points_proto_msgTypes[105].OneofWrappers = []any{}
+ file_points_proto_msgTypes[109].OneofWrappers = []any{}
+ file_points_proto_msgTypes[110].OneofWrappers = []any{}
+ file_points_proto_msgTypes[111].OneofWrappers = []any{}
+ file_points_proto_msgTypes[112].OneofWrappers = []any{}
+ file_points_proto_msgTypes[113].OneofWrappers = []any{}
+ file_points_proto_msgTypes[114].OneofWrappers = []any{}
+ file_points_proto_msgTypes[115].OneofWrappers = []any{}
+ file_points_proto_msgTypes[116].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/qdrant/points_service.pb.go b/qdrant/points_service.pb.go
index 77523ed..13a3484 100644
--- a/qdrant/points_service.pb.go
+++ b/qdrant/points_service.pb.go
@@ -1,10 +1,10 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.26.0
-// protoc v4.22.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.1
// source: points_service.proto
-package go_client
+package qdrant
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -141,7 +141,7 @@ var file_points_service_proto_rawDesc = []byte{
0x6e, 0x73, 0x65, 0x22, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
-var file_points_service_proto_goTypes = []interface{}{
+var file_points_service_proto_goTypes = []any{
(*UpsertPoints)(nil), // 0: qdrant.UpsertPoints
(*DeletePoints)(nil), // 1: qdrant.DeletePoints
(*GetPoints)(nil), // 2: qdrant.GetPoints
diff --git a/qdrant/points_service_grpc.pb.go b/qdrant/points_service_grpc.pb.go
index 4194bb4..f7a50a3 100644
--- a/qdrant/points_service_grpc.pb.go
+++ b/qdrant/points_service_grpc.pb.go
@@ -1,22 +1,48 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.2.0
-// - protoc v4.22.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v4.25.1
// source: points_service.proto
-package go_client
+package qdrant
import (
context "context"
grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ Points_Upsert_FullMethodName = "/qdrant.Points/Upsert"
+ Points_Delete_FullMethodName = "/qdrant.Points/Delete"
+ Points_Get_FullMethodName = "/qdrant.Points/Get"
+ Points_UpdateVectors_FullMethodName = "/qdrant.Points/UpdateVectors"
+ Points_DeleteVectors_FullMethodName = "/qdrant.Points/DeleteVectors"
+ Points_SetPayload_FullMethodName = "/qdrant.Points/SetPayload"
+ Points_OverwritePayload_FullMethodName = "/qdrant.Points/OverwritePayload"
+ Points_DeletePayload_FullMethodName = "/qdrant.Points/DeletePayload"
+ Points_ClearPayload_FullMethodName = "/qdrant.Points/ClearPayload"
+ Points_CreateFieldIndex_FullMethodName = "/qdrant.Points/CreateFieldIndex"
+ Points_DeleteFieldIndex_FullMethodName = "/qdrant.Points/DeleteFieldIndex"
+ Points_Search_FullMethodName = "/qdrant.Points/Search"
+ Points_SearchBatch_FullMethodName = "/qdrant.Points/SearchBatch"
+ Points_SearchGroups_FullMethodName = "/qdrant.Points/SearchGroups"
+ Points_Scroll_FullMethodName = "/qdrant.Points/Scroll"
+ Points_Recommend_FullMethodName = "/qdrant.Points/Recommend"
+ Points_RecommendBatch_FullMethodName = "/qdrant.Points/RecommendBatch"
+ Points_RecommendGroups_FullMethodName = "/qdrant.Points/RecommendGroups"
+ Points_Discover_FullMethodName = "/qdrant.Points/Discover"
+ Points_DiscoverBatch_FullMethodName = "/qdrant.Points/DiscoverBatch"
+ Points_Count_FullMethodName = "/qdrant.Points/Count"
+ Points_UpdateBatch_FullMethodName = "/qdrant.Points/UpdateBatch"
+ Points_Query_FullMethodName = "/qdrant.Points/Query"
+ Points_QueryBatch_FullMethodName = "/qdrant.Points/QueryBatch"
+ Points_QueryGroups_FullMethodName = "/qdrant.Points/QueryGroups"
+)
// PointsClient is the client API for Points service.
//
@@ -97,8 +123,9 @@ func NewPointsClient(cc grpc.ClientConnInterface) PointsClient {
}
func (c *pointsClient) Upsert(ctx context.Context, in *UpsertPoints, opts ...grpc.CallOption) (*PointsOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PointsOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/Upsert", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_Upsert_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -106,8 +133,9 @@ func (c *pointsClient) Upsert(ctx context.Context, in *UpsertPoints, opts ...grp
}
func (c *pointsClient) Delete(ctx context.Context, in *DeletePoints, opts ...grpc.CallOption) (*PointsOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PointsOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/Delete", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_Delete_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -115,8 +143,9 @@ func (c *pointsClient) Delete(ctx context.Context, in *DeletePoints, opts ...grp
}
func (c *pointsClient) Get(ctx context.Context, in *GetPoints, opts ...grpc.CallOption) (*GetResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(GetResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/Get", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_Get_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -124,8 +153,9 @@ func (c *pointsClient) Get(ctx context.Context, in *GetPoints, opts ...grpc.Call
}
func (c *pointsClient) UpdateVectors(ctx context.Context, in *UpdatePointVectors, opts ...grpc.CallOption) (*PointsOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PointsOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/UpdateVectors", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_UpdateVectors_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -133,8 +163,9 @@ func (c *pointsClient) UpdateVectors(ctx context.Context, in *UpdatePointVectors
}
func (c *pointsClient) DeleteVectors(ctx context.Context, in *DeletePointVectors, opts ...grpc.CallOption) (*PointsOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PointsOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/DeleteVectors", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_DeleteVectors_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -142,8 +173,9 @@ func (c *pointsClient) DeleteVectors(ctx context.Context, in *DeletePointVectors
}
func (c *pointsClient) SetPayload(ctx context.Context, in *SetPayloadPoints, opts ...grpc.CallOption) (*PointsOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PointsOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/SetPayload", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_SetPayload_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -151,8 +183,9 @@ func (c *pointsClient) SetPayload(ctx context.Context, in *SetPayloadPoints, opt
}
func (c *pointsClient) OverwritePayload(ctx context.Context, in *SetPayloadPoints, opts ...grpc.CallOption) (*PointsOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PointsOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/OverwritePayload", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_OverwritePayload_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -160,8 +193,9 @@ func (c *pointsClient) OverwritePayload(ctx context.Context, in *SetPayloadPoint
}
func (c *pointsClient) DeletePayload(ctx context.Context, in *DeletePayloadPoints, opts ...grpc.CallOption) (*PointsOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PointsOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/DeletePayload", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_DeletePayload_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -169,8 +203,9 @@ func (c *pointsClient) DeletePayload(ctx context.Context, in *DeletePayloadPoint
}
func (c *pointsClient) ClearPayload(ctx context.Context, in *ClearPayloadPoints, opts ...grpc.CallOption) (*PointsOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PointsOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/ClearPayload", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_ClearPayload_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -178,8 +213,9 @@ func (c *pointsClient) ClearPayload(ctx context.Context, in *ClearPayloadPoints,
}
func (c *pointsClient) CreateFieldIndex(ctx context.Context, in *CreateFieldIndexCollection, opts ...grpc.CallOption) (*PointsOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PointsOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/CreateFieldIndex", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_CreateFieldIndex_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -187,8 +223,9 @@ func (c *pointsClient) CreateFieldIndex(ctx context.Context, in *CreateFieldInde
}
func (c *pointsClient) DeleteFieldIndex(ctx context.Context, in *DeleteFieldIndexCollection, opts ...grpc.CallOption) (*PointsOperationResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(PointsOperationResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/DeleteFieldIndex", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_DeleteFieldIndex_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -196,8 +233,9 @@ func (c *pointsClient) DeleteFieldIndex(ctx context.Context, in *DeleteFieldInde
}
func (c *pointsClient) Search(ctx context.Context, in *SearchPoints, opts ...grpc.CallOption) (*SearchResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SearchResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/Search", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_Search_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -205,8 +243,9 @@ func (c *pointsClient) Search(ctx context.Context, in *SearchPoints, opts ...grp
}
func (c *pointsClient) SearchBatch(ctx context.Context, in *SearchBatchPoints, opts ...grpc.CallOption) (*SearchBatchResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SearchBatchResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/SearchBatch", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_SearchBatch_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -214,8 +253,9 @@ func (c *pointsClient) SearchBatch(ctx context.Context, in *SearchBatchPoints, o
}
func (c *pointsClient) SearchGroups(ctx context.Context, in *SearchPointGroups, opts ...grpc.CallOption) (*SearchGroupsResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(SearchGroupsResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/SearchGroups", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_SearchGroups_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -223,8 +263,9 @@ func (c *pointsClient) SearchGroups(ctx context.Context, in *SearchPointGroups,
}
func (c *pointsClient) Scroll(ctx context.Context, in *ScrollPoints, opts ...grpc.CallOption) (*ScrollResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ScrollResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/Scroll", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_Scroll_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -232,8 +273,9 @@ func (c *pointsClient) Scroll(ctx context.Context, in *ScrollPoints, opts ...grp
}
func (c *pointsClient) Recommend(ctx context.Context, in *RecommendPoints, opts ...grpc.CallOption) (*RecommendResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RecommendResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/Recommend", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_Recommend_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -241,8 +283,9 @@ func (c *pointsClient) Recommend(ctx context.Context, in *RecommendPoints, opts
}
func (c *pointsClient) RecommendBatch(ctx context.Context, in *RecommendBatchPoints, opts ...grpc.CallOption) (*RecommendBatchResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RecommendBatchResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/RecommendBatch", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_RecommendBatch_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -250,8 +293,9 @@ func (c *pointsClient) RecommendBatch(ctx context.Context, in *RecommendBatchPoi
}
func (c *pointsClient) RecommendGroups(ctx context.Context, in *RecommendPointGroups, opts ...grpc.CallOption) (*RecommendGroupsResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(RecommendGroupsResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/RecommendGroups", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_RecommendGroups_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -259,8 +303,9 @@ func (c *pointsClient) RecommendGroups(ctx context.Context, in *RecommendPointGr
}
func (c *pointsClient) Discover(ctx context.Context, in *DiscoverPoints, opts ...grpc.CallOption) (*DiscoverResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DiscoverResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/Discover", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_Discover_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -268,8 +313,9 @@ func (c *pointsClient) Discover(ctx context.Context, in *DiscoverPoints, opts ..
}
func (c *pointsClient) DiscoverBatch(ctx context.Context, in *DiscoverBatchPoints, opts ...grpc.CallOption) (*DiscoverBatchResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DiscoverBatchResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/DiscoverBatch", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_DiscoverBatch_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -277,8 +323,9 @@ func (c *pointsClient) DiscoverBatch(ctx context.Context, in *DiscoverBatchPoint
}
func (c *pointsClient) Count(ctx context.Context, in *CountPoints, opts ...grpc.CallOption) (*CountResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CountResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/Count", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_Count_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -286,8 +333,9 @@ func (c *pointsClient) Count(ctx context.Context, in *CountPoints, opts ...grpc.
}
func (c *pointsClient) UpdateBatch(ctx context.Context, in *UpdateBatchPoints, opts ...grpc.CallOption) (*UpdateBatchResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(UpdateBatchResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/UpdateBatch", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_UpdateBatch_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -295,8 +343,9 @@ func (c *pointsClient) UpdateBatch(ctx context.Context, in *UpdateBatchPoints, o
}
func (c *pointsClient) Query(ctx context.Context, in *QueryPoints, opts ...grpc.CallOption) (*QueryResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(QueryResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/Query", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_Query_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -304,8 +353,9 @@ func (c *pointsClient) Query(ctx context.Context, in *QueryPoints, opts ...grpc.
}
func (c *pointsClient) QueryBatch(ctx context.Context, in *QueryBatchPoints, opts ...grpc.CallOption) (*QueryBatchResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(QueryBatchResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/QueryBatch", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_QueryBatch_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -313,735 +363,11 @@ func (c *pointsClient) QueryBatch(ctx context.Context, in *QueryBatchPoints, opt
}
func (c *pointsClient) QueryGroups(ctx context.Context, in *QueryPointGroups, opts ...grpc.CallOption) (*QueryGroupsResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(QueryGroupsResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Points/QueryGroups", in, out, opts...)
+ err := c.cc.Invoke(ctx, Points_QueryGroups_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
-
-// PointsServer is the server API for Points service.
-// All implementations must embed UnimplementedPointsServer
-// for forward compatibility
-type PointsServer interface {
- // Perform insert + updates on points. If a point with a given ID already exists - it will be overwritten.
- Upsert(context.Context, *UpsertPoints) (*PointsOperationResponse, error)
- // Delete points
- Delete(context.Context, *DeletePoints) (*PointsOperationResponse, error)
- // Retrieve points
- Get(context.Context, *GetPoints) (*GetResponse, error)
- // Update named vectors for point
- UpdateVectors(context.Context, *UpdatePointVectors) (*PointsOperationResponse, error)
- // Delete named vectors for points
- DeleteVectors(context.Context, *DeletePointVectors) (*PointsOperationResponse, error)
- // Set payload for points
- SetPayload(context.Context, *SetPayloadPoints) (*PointsOperationResponse, error)
- // Overwrite payload for points
- OverwritePayload(context.Context, *SetPayloadPoints) (*PointsOperationResponse, error)
- // Delete specified key payload for points
- DeletePayload(context.Context, *DeletePayloadPoints) (*PointsOperationResponse, error)
- // Remove all payload for specified points
- ClearPayload(context.Context, *ClearPayloadPoints) (*PointsOperationResponse, error)
- // Create index for field in collection
- CreateFieldIndex(context.Context, *CreateFieldIndexCollection) (*PointsOperationResponse, error)
- // Delete field index for collection
- DeleteFieldIndex(context.Context, *DeleteFieldIndexCollection) (*PointsOperationResponse, error)
- // Retrieve closest points based on vector similarity and given filtering conditions
- Search(context.Context, *SearchPoints) (*SearchResponse, error)
- // Retrieve closest points based on vector similarity and given filtering conditions
- SearchBatch(context.Context, *SearchBatchPoints) (*SearchBatchResponse, error)
- // Retrieve closest points based on vector similarity and given filtering conditions, grouped by a given field
- SearchGroups(context.Context, *SearchPointGroups) (*SearchGroupsResponse, error)
- // Iterate over all or filtered points
- Scroll(context.Context, *ScrollPoints) (*ScrollResponse, error)
- // Look for the points which are closer to stored positive examples and at the same time further to negative examples.
- Recommend(context.Context, *RecommendPoints) (*RecommendResponse, error)
- // Look for the points which are closer to stored positive examples and at the same time further to negative examples.
- RecommendBatch(context.Context, *RecommendBatchPoints) (*RecommendBatchResponse, error)
- // Look for the points which are closer to stored positive examples and at the same time further to negative examples, grouped by a given field
- RecommendGroups(context.Context, *RecommendPointGroups) (*RecommendGroupsResponse, error)
- // Use context and a target to find the most similar points to the target, constrained by the context.
- //
- // When using only the context (without a target), a special search - called context search - is performed where
- // pairs of points are used to generate a loss that guides the search towards the zone where
- // most positive examples overlap. This means that the score minimizes the scenario of
- // finding a point closer to a negative than to a positive part of a pair.
- //
- // Since the score of a context relates to loss, the maximum score a point can get is 0.0,
- // and it becomes normal that many points can have a score of 0.0.
- //
- // When using target (with or without context), the score behaves a little different: The
- // integer part of the score represents the rank with respect to the context, while the
- // decimal part of the score relates to the distance to the target. The context part of the score for
- // each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair,
- // and -1 otherwise.
- Discover(context.Context, *DiscoverPoints) (*DiscoverResponse, error)
- // Batch request points based on { positive, negative } pairs of examples, and/or a target
- DiscoverBatch(context.Context, *DiscoverBatchPoints) (*DiscoverBatchResponse, error)
- // Count points in collection with given filtering conditions
- Count(context.Context, *CountPoints) (*CountResponse, error)
- // Perform multiple update operations in one request
- UpdateBatch(context.Context, *UpdateBatchPoints) (*UpdateBatchResponse, error)
- // Universally query points. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries.
- Query(context.Context, *QueryPoints) (*QueryResponse, error)
- // Universally query points in a batch fashion. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries.
- QueryBatch(context.Context, *QueryBatchPoints) (*QueryBatchResponse, error)
- // Universally query points in a group fashion. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries.
- QueryGroups(context.Context, *QueryPointGroups) (*QueryGroupsResponse, error)
- mustEmbedUnimplementedPointsServer()
-}
-
-// UnimplementedPointsServer must be embedded to have forward compatible implementations.
-type UnimplementedPointsServer struct {
-}
-
-func (UnimplementedPointsServer) Upsert(context.Context, *UpsertPoints) (*PointsOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Upsert not implemented")
-}
-func (UnimplementedPointsServer) Delete(context.Context, *DeletePoints) (*PointsOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
-}
-func (UnimplementedPointsServer) Get(context.Context, *GetPoints) (*GetResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Get not implemented")
-}
-func (UnimplementedPointsServer) UpdateVectors(context.Context, *UpdatePointVectors) (*PointsOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateVectors not implemented")
-}
-func (UnimplementedPointsServer) DeleteVectors(context.Context, *DeletePointVectors) (*PointsOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteVectors not implemented")
-}
-func (UnimplementedPointsServer) SetPayload(context.Context, *SetPayloadPoints) (*PointsOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method SetPayload not implemented")
-}
-func (UnimplementedPointsServer) OverwritePayload(context.Context, *SetPayloadPoints) (*PointsOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method OverwritePayload not implemented")
-}
-func (UnimplementedPointsServer) DeletePayload(context.Context, *DeletePayloadPoints) (*PointsOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeletePayload not implemented")
-}
-func (UnimplementedPointsServer) ClearPayload(context.Context, *ClearPayloadPoints) (*PointsOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ClearPayload not implemented")
-}
-func (UnimplementedPointsServer) CreateFieldIndex(context.Context, *CreateFieldIndexCollection) (*PointsOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateFieldIndex not implemented")
-}
-func (UnimplementedPointsServer) DeleteFieldIndex(context.Context, *DeleteFieldIndexCollection) (*PointsOperationResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteFieldIndex not implemented")
-}
-func (UnimplementedPointsServer) Search(context.Context, *SearchPoints) (*SearchResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Search not implemented")
-}
-func (UnimplementedPointsServer) SearchBatch(context.Context, *SearchBatchPoints) (*SearchBatchResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method SearchBatch not implemented")
-}
-func (UnimplementedPointsServer) SearchGroups(context.Context, *SearchPointGroups) (*SearchGroupsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method SearchGroups not implemented")
-}
-func (UnimplementedPointsServer) Scroll(context.Context, *ScrollPoints) (*ScrollResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Scroll not implemented")
-}
-func (UnimplementedPointsServer) Recommend(context.Context, *RecommendPoints) (*RecommendResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Recommend not implemented")
-}
-func (UnimplementedPointsServer) RecommendBatch(context.Context, *RecommendBatchPoints) (*RecommendBatchResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RecommendBatch not implemented")
-}
-func (UnimplementedPointsServer) RecommendGroups(context.Context, *RecommendPointGroups) (*RecommendGroupsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method RecommendGroups not implemented")
-}
-func (UnimplementedPointsServer) Discover(context.Context, *DiscoverPoints) (*DiscoverResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Discover not implemented")
-}
-func (UnimplementedPointsServer) DiscoverBatch(context.Context, *DiscoverBatchPoints) (*DiscoverBatchResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DiscoverBatch not implemented")
-}
-func (UnimplementedPointsServer) Count(context.Context, *CountPoints) (*CountResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Count not implemented")
-}
-func (UnimplementedPointsServer) UpdateBatch(context.Context, *UpdateBatchPoints) (*UpdateBatchResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method UpdateBatch not implemented")
-}
-func (UnimplementedPointsServer) Query(context.Context, *QueryPoints) (*QueryResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Query not implemented")
-}
-func (UnimplementedPointsServer) QueryBatch(context.Context, *QueryBatchPoints) (*QueryBatchResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method QueryBatch not implemented")
-}
-func (UnimplementedPointsServer) QueryGroups(context.Context, *QueryPointGroups) (*QueryGroupsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method QueryGroups not implemented")
-}
-func (UnimplementedPointsServer) mustEmbedUnimplementedPointsServer() {}
-
-// UnsafePointsServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to PointsServer will
-// result in compilation errors.
-type UnsafePointsServer interface {
- mustEmbedUnimplementedPointsServer()
-}
-
-func RegisterPointsServer(s grpc.ServiceRegistrar, srv PointsServer) {
- s.RegisterService(&Points_ServiceDesc, srv)
-}
-
-func _Points_Upsert_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpsertPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).Upsert(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/Upsert",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).Upsert(ctx, req.(*UpsertPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeletePoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).Delete(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/Delete",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).Delete(ctx, req.(*DeletePoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(GetPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).Get(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/Get",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).Get(ctx, req.(*GetPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_UpdateVectors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdatePointVectors)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).UpdateVectors(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/UpdateVectors",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).UpdateVectors(ctx, req.(*UpdatePointVectors))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_DeleteVectors_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeletePointVectors)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).DeleteVectors(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/DeleteVectors",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).DeleteVectors(ctx, req.(*DeletePointVectors))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_SetPayload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(SetPayloadPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).SetPayload(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/SetPayload",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).SetPayload(ctx, req.(*SetPayloadPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_OverwritePayload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(SetPayloadPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).OverwritePayload(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/OverwritePayload",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).OverwritePayload(ctx, req.(*SetPayloadPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_DeletePayload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeletePayloadPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).DeletePayload(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/DeletePayload",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).DeletePayload(ctx, req.(*DeletePayloadPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_ClearPayload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ClearPayloadPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).ClearPayload(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/ClearPayload",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).ClearPayload(ctx, req.(*ClearPayloadPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_CreateFieldIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateFieldIndexCollection)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).CreateFieldIndex(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/CreateFieldIndex",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).CreateFieldIndex(ctx, req.(*CreateFieldIndexCollection))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_DeleteFieldIndex_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteFieldIndexCollection)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).DeleteFieldIndex(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/DeleteFieldIndex",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).DeleteFieldIndex(ctx, req.(*DeleteFieldIndexCollection))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_Search_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(SearchPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).Search(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/Search",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).Search(ctx, req.(*SearchPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_SearchBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(SearchBatchPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).SearchBatch(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/SearchBatch",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).SearchBatch(ctx, req.(*SearchBatchPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_SearchGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(SearchPointGroups)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).SearchGroups(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/SearchGroups",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).SearchGroups(ctx, req.(*SearchPointGroups))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_Scroll_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ScrollPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).Scroll(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/Scroll",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).Scroll(ctx, req.(*ScrollPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_Recommend_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RecommendPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).Recommend(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/Recommend",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).Recommend(ctx, req.(*RecommendPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_RecommendBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RecommendBatchPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).RecommendBatch(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/RecommendBatch",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).RecommendBatch(ctx, req.(*RecommendBatchPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_RecommendGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(RecommendPointGroups)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).RecommendGroups(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/RecommendGroups",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).RecommendGroups(ctx, req.(*RecommendPointGroups))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_Discover_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DiscoverPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).Discover(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/Discover",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).Discover(ctx, req.(*DiscoverPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_DiscoverBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DiscoverBatchPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).DiscoverBatch(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/DiscoverBatch",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).DiscoverBatch(ctx, req.(*DiscoverBatchPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_Count_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CountPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).Count(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/Count",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).Count(ctx, req.(*CountPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_UpdateBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(UpdateBatchPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).UpdateBatch(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/UpdateBatch",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).UpdateBatch(ctx, req.(*UpdateBatchPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_Query_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(QueryPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).Query(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/Query",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).Query(ctx, req.(*QueryPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_QueryBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(QueryBatchPoints)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).QueryBatch(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/QueryBatch",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).QueryBatch(ctx, req.(*QueryBatchPoints))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Points_QueryGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(QueryPointGroups)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(PointsServer).QueryGroups(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Points/QueryGroups",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(PointsServer).QueryGroups(ctx, req.(*QueryPointGroups))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-// Points_ServiceDesc is the grpc.ServiceDesc for Points service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var Points_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "qdrant.Points",
- HandlerType: (*PointsServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Upsert",
- Handler: _Points_Upsert_Handler,
- },
- {
- MethodName: "Delete",
- Handler: _Points_Delete_Handler,
- },
- {
- MethodName: "Get",
- Handler: _Points_Get_Handler,
- },
- {
- MethodName: "UpdateVectors",
- Handler: _Points_UpdateVectors_Handler,
- },
- {
- MethodName: "DeleteVectors",
- Handler: _Points_DeleteVectors_Handler,
- },
- {
- MethodName: "SetPayload",
- Handler: _Points_SetPayload_Handler,
- },
- {
- MethodName: "OverwritePayload",
- Handler: _Points_OverwritePayload_Handler,
- },
- {
- MethodName: "DeletePayload",
- Handler: _Points_DeletePayload_Handler,
- },
- {
- MethodName: "ClearPayload",
- Handler: _Points_ClearPayload_Handler,
- },
- {
- MethodName: "CreateFieldIndex",
- Handler: _Points_CreateFieldIndex_Handler,
- },
- {
- MethodName: "DeleteFieldIndex",
- Handler: _Points_DeleteFieldIndex_Handler,
- },
- {
- MethodName: "Search",
- Handler: _Points_Search_Handler,
- },
- {
- MethodName: "SearchBatch",
- Handler: _Points_SearchBatch_Handler,
- },
- {
- MethodName: "SearchGroups",
- Handler: _Points_SearchGroups_Handler,
- },
- {
- MethodName: "Scroll",
- Handler: _Points_Scroll_Handler,
- },
- {
- MethodName: "Recommend",
- Handler: _Points_Recommend_Handler,
- },
- {
- MethodName: "RecommendBatch",
- Handler: _Points_RecommendBatch_Handler,
- },
- {
- MethodName: "RecommendGroups",
- Handler: _Points_RecommendGroups_Handler,
- },
- {
- MethodName: "Discover",
- Handler: _Points_Discover_Handler,
- },
- {
- MethodName: "DiscoverBatch",
- Handler: _Points_DiscoverBatch_Handler,
- },
- {
- MethodName: "Count",
- Handler: _Points_Count_Handler,
- },
- {
- MethodName: "UpdateBatch",
- Handler: _Points_UpdateBatch_Handler,
- },
- {
- MethodName: "Query",
- Handler: _Points_Query_Handler,
- },
- {
- MethodName: "QueryBatch",
- Handler: _Points_QueryBatch_Handler,
- },
- {
- MethodName: "QueryGroups",
- Handler: _Points_QueryGroups_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "points_service.proto",
-}
diff --git a/qdrant/qdrant.go b/qdrant/qdrant.go
new file mode 100644
index 0000000..8bdb2e8
--- /dev/null
+++ b/qdrant/qdrant.go
@@ -0,0 +1,14 @@
+package qdrant
+
+import (
+ "context"
+)
+
+// Check liveliness of the service.
+func (c *Client) HealthCheck(ctx context.Context) (*HealthCheckReply, error) {
+ resp, err := c.GetQdrantClient().HealthCheck(ctx, &HealthCheckRequest{})
+ if err != nil {
+ return nil, newQdrantErr(err, "HealthCheck")
+ }
+ return resp, nil
+}
diff --git a/qdrant/qdrant.pb.go b/qdrant/qdrant.pb.go
index d3a9775..f9f04c7 100644
--- a/qdrant/qdrant.pb.go
+++ b/qdrant/qdrant.pb.go
@@ -1,10 +1,10 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.26.0
-// protoc v4.22.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.1
// source: qdrant.proto
-package go_client
+package qdrant
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -159,7 +159,7 @@ func file_qdrant_proto_rawDescGZIP() []byte {
}
var file_qdrant_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
-var file_qdrant_proto_goTypes = []interface{}{
+var file_qdrant_proto_goTypes = []any{
(*HealthCheckRequest)(nil), // 0: qdrant.HealthCheckRequest
(*HealthCheckReply)(nil), // 1: qdrant.HealthCheckReply
}
@@ -182,7 +182,7 @@ func file_qdrant_proto_init() {
file_points_service_proto_init()
file_snapshots_service_proto_init()
if !protoimpl.UnsafeEnabled {
- file_qdrant_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_qdrant_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*HealthCheckRequest); i {
case 0:
return &v.state
@@ -194,7 +194,7 @@ func file_qdrant_proto_init() {
return nil
}
}
- file_qdrant_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_qdrant_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*HealthCheckReply); i {
case 0:
return &v.state
@@ -207,7 +207,7 @@ func file_qdrant_proto_init() {
}
}
}
- file_qdrant_proto_msgTypes[1].OneofWrappers = []interface{}{}
+ file_qdrant_proto_msgTypes[1].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/qdrant/qdrant_grpc.pb.go b/qdrant/qdrant_grpc.pb.go
index 5fa4769..b86f154 100644
--- a/qdrant/qdrant_grpc.pb.go
+++ b/qdrant/qdrant_grpc.pb.go
@@ -1,10 +1,10 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.2.0
-// - protoc v4.22.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v4.25.1
// source: qdrant.proto
-package go_client
+package qdrant
import (
context "context"
@@ -15,8 +15,12 @@ import (
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ Qdrant_HealthCheck_FullMethodName = "/qdrant.Qdrant/HealthCheck"
+)
// QdrantClient is the client API for Qdrant service.
//
@@ -34,8 +38,9 @@ func NewQdrantClient(cc grpc.ClientConnInterface) QdrantClient {
}
func (c *qdrantClient) HealthCheck(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckReply, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(HealthCheckReply)
- err := c.cc.Invoke(ctx, "/qdrant.Qdrant/HealthCheck", in, out, opts...)
+ err := c.cc.Invoke(ctx, Qdrant_HealthCheck_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -44,20 +49,24 @@ func (c *qdrantClient) HealthCheck(ctx context.Context, in *HealthCheckRequest,
// QdrantServer is the server API for Qdrant service.
// All implementations must embed UnimplementedQdrantServer
-// for forward compatibility
+// for forward compatibility.
type QdrantServer interface {
HealthCheck(context.Context, *HealthCheckRequest) (*HealthCheckReply, error)
mustEmbedUnimplementedQdrantServer()
}
-// UnimplementedQdrantServer must be embedded to have forward compatible implementations.
-type UnimplementedQdrantServer struct {
-}
+// UnimplementedQdrantServer must be embedded to have
+// forward compatible implementations.
+//
+// NOTE: this should be embedded by value instead of pointer to avoid a nil
+// pointer dereference when methods are called.
+type UnimplementedQdrantServer struct{}
func (UnimplementedQdrantServer) HealthCheck(context.Context, *HealthCheckRequest) (*HealthCheckReply, error) {
return nil, status.Errorf(codes.Unimplemented, "method HealthCheck not implemented")
}
func (UnimplementedQdrantServer) mustEmbedUnimplementedQdrantServer() {}
+func (UnimplementedQdrantServer) testEmbeddedByValue() {}
// UnsafeQdrantServer may be embedded to opt out of forward compatibility for this service.
// Use of this interface is not recommended, as added methods to QdrantServer will
@@ -67,6 +76,13 @@ type UnsafeQdrantServer interface {
}
func RegisterQdrantServer(s grpc.ServiceRegistrar, srv QdrantServer) {
+ // If the following call pancis, it indicates UnimplementedQdrantServer was
+ // embedded by pointer and is nil. This will cause panics if an
+ // unimplemented method is ever invoked, so we test this at initialization
+ // time to prevent it from happening at runtime later due to I/O.
+ if t, ok := srv.(interface{ testEmbeddedByValue() }); ok {
+ t.testEmbeddedByValue()
+ }
s.RegisterService(&Qdrant_ServiceDesc, srv)
}
@@ -80,7 +96,7 @@ func _Qdrant_HealthCheck_Handler(srv interface{}, ctx context.Context, dec func(
}
info := &grpc.UnaryServerInfo{
Server: srv,
- FullMethod: "/qdrant.Qdrant/HealthCheck",
+ FullMethod: Qdrant_HealthCheck_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(QdrantServer).HealthCheck(ctx, req.(*HealthCheckRequest))
diff --git a/qdrant/qdrant_grpc.pb_test.go b/qdrant/qdrant_grpc.pb_test.go
deleted file mode 100644
index 5c2a945..0000000
--- a/qdrant/qdrant_grpc.pb_test.go
+++ /dev/null
@@ -1,327 +0,0 @@
-package go_client_test
-
-import (
- context "context"
- "log"
- "testing"
- "time"
-
- pb "github.com/qdrant/go-client/qdrant"
- "github.com/testcontainers/testcontainers-go"
- "github.com/testcontainers/testcontainers-go/modules/qdrant"
-
- grpc "google.golang.org/grpc"
- "google.golang.org/grpc/credentials/insecure"
-)
-
-func TestNewQdrantClient(t *testing.T) {
- var (
- collectionName = "test_collection"
- vectorSize uint64 = 4
- distance = pb.Distance_Dot
- )
-
- c, err := qdrant.RunContainer(context.Background(), testcontainers.WithImage("qdrant/qdrant:v1.9.0"))
- if err != nil {
- t.Fatalf("Could not start qdrant container: %v", err)
- }
-
- addr, err := c.GRPCEndpoint(context.Background())
- if err != nil {
- t.Fatalf("Could not get qdrant container grpc endpoint: %v", err)
- }
-
- // Set up a connection to the server.
- conn, err := grpc.DialContext(context.Background(), addr, grpc.WithTransportCredentials(insecure.NewCredentials()))
- if err != nil {
- t.Fatalf("Failed to connect: %v", err)
- }
- defer conn.Close()
-
- // create grpc collection client
- collections_client := pb.NewCollectionsClient(conn)
-
- // Contact the server and print out its response.
- ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
- defer cancel()
-
- qdrantClient := pb.NewQdrantClient(conn)
-
- t.Run("Check Qdrant version", func(t *testing.T) {
- healthCheckResult, err := qdrantClient.HealthCheck(ctx, &pb.HealthCheckRequest{})
- if err != nil {
- t.Fatalf("Could not get health: %v", err)
- } else {
- log.Printf("Qdrant version: %s", healthCheckResult.GetVersion())
- }
- })
-
- t.Run("Delete collection", func(t *testing.T) {
- _, err = collections_client.Delete(ctx, &pb.DeleteCollection{
- CollectionName: collectionName,
- })
- if err != nil {
- t.Fatalf("Could not delete collection: %v", err)
- } else {
- log.Println("Collection", collectionName, "deleted")
- }
- })
-
- t.Run("Create new collection", func(t *testing.T) {
- var defaultSegmentNumber uint64 = 2
- _, err = collections_client.Create(ctx, &pb.CreateCollection{
- CollectionName: collectionName,
- VectorsConfig: &pb.VectorsConfig{Config: &pb.VectorsConfig_Params{
- Params: &pb.VectorParams{
- Size: vectorSize,
- Distance: distance,
- },
- }},
- OptimizersConfig: &pb.OptimizersConfigDiff{
- DefaultSegmentNumber: &defaultSegmentNumber,
- },
- })
- if err != nil {
- t.Fatalf("Could not create collection: %v", err)
- } else {
- log.Println("Collection", collectionName, "created")
- }
- })
-
- t.Run("List all created collections", func(t *testing.T) {
- r, err := collections_client.List(ctx, &pb.ListCollectionsRequest{})
- if err != nil {
- t.Fatalf("Could not get collections: %v", err)
- } else {
- log.Printf("List of collections: %s", r.GetCollections())
- }
- })
-
- // Create points grpc client
- pointsClient := pb.NewPointsClient(conn)
-
- t.Run("Create keyword field index", func(t *testing.T) {
- fieldIndex1Type := pb.FieldType_FieldTypeKeyword
- fieldIndex1Name := "city"
- _, err = pointsClient.CreateFieldIndex(ctx, &pb.CreateFieldIndexCollection{
- CollectionName: collectionName,
- FieldName: fieldIndex1Name,
- FieldType: &fieldIndex1Type,
- })
- if err != nil {
- t.Fatalf("Could not create field index: %v", err)
- } else {
- log.Println("Field index for field", fieldIndex1Name, "created")
- }
- })
-
- t.Run("Create integer field index", func(t *testing.T) {
- fieldIndex2Type := pb.FieldType_FieldTypeInteger
- fieldIndex2Name := "count"
- _, err = pointsClient.CreateFieldIndex(ctx, &pb.CreateFieldIndexCollection{
- CollectionName: collectionName,
- FieldName: fieldIndex2Name,
- FieldType: &fieldIndex2Type,
- })
- if err != nil {
- t.Fatalf("Could not create field index: %v", err)
- } else {
- log.Println("Field index for field", fieldIndex2Name, "created")
- }
- })
-
- t.Run("Upsert points", func(t *testing.T) {
- waitUpsert := true
- upsertPoints := []*pb.PointStruct{
- {
- // Point Id is number or UUID
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Num{Num: 1},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.05, 0.61, 0.76, 0.74}}}},
- Payload: map[string]*pb.Value{
- "city": {
- Kind: &pb.Value_StringValue{StringValue: "Berlin"},
- },
- "country": {
- Kind: &pb.Value_StringValue{StringValue: "Germany"},
- },
- "count": {
- Kind: &pb.Value_IntegerValue{IntegerValue: 1000000},
- },
- "square": {
- Kind: &pb.Value_DoubleValue{DoubleValue: 12.5},
- },
- },
- },
- {
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Num{Num: 2},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.19, 0.81, 0.75, 0.11}}}},
- Payload: map[string]*pb.Value{
- "city": {
- Kind: &pb.Value_ListValue{
- ListValue: &pb.ListValue{
- Values: []*pb.Value{
- {
- Kind: &pb.Value_StringValue{StringValue: "Berlin"},
- },
- {
- Kind: &pb.Value_StringValue{StringValue: "London"},
- },
- },
- },
- },
- },
- },
- },
- {
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Num{Num: 3},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.36, 0.55, 0.47, 0.94}}}},
- Payload: map[string]*pb.Value{
- "city": {
- Kind: &pb.Value_ListValue{
- ListValue: &pb.ListValue{
- Values: []*pb.Value{
- {
- Kind: &pb.Value_StringValue{StringValue: "Berlin"},
- },
- {
- Kind: &pb.Value_StringValue{StringValue: "Moscow"},
- },
- },
- },
- },
- },
- },
- },
- {
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Num{Num: 4},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.18, 0.01, 0.85, 0.80}}}},
- Payload: map[string]*pb.Value{
- "city": {
- Kind: &pb.Value_ListValue{
- ListValue: &pb.ListValue{
- Values: []*pb.Value{
- {
- Kind: &pb.Value_StringValue{StringValue: "London"},
- },
- {
- Kind: &pb.Value_StringValue{StringValue: "Moscow"},
- },
- },
- },
- },
- },
- },
- },
- {
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Num{Num: 5},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.24, 0.18, 0.22, 0.44}}}},
- Payload: map[string]*pb.Value{
- "count": {
- Kind: &pb.Value_ListValue{
- ListValue: &pb.ListValue{
- Values: []*pb.Value{
- {
- Kind: &pb.Value_IntegerValue{IntegerValue: 0},
- },
- },
- },
- },
- },
- },
- },
- {
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Num{Num: 6},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.35, 0.08, 0.11, 0.44}}}},
- Payload: map[string]*pb.Value{},
- },
- {
- Id: &pb.PointId{
- PointIdOptions: &pb.PointId_Uuid{Uuid: "58384991-3295-4e21-b711-fd3b94fa73e3"},
- },
- Vectors: &pb.Vectors{VectorsOptions: &pb.Vectors_Vector{Vector: &pb.Vector{Data: []float32{0.35, 0.08, 0.11, 0.44}}}},
- Payload: map[string]*pb.Value{},
- },
- }
- _, err = pointsClient.Upsert(ctx, &pb.UpsertPoints{
- CollectionName: collectionName,
- Wait: &waitUpsert,
- Points: upsertPoints,
- })
- if err != nil {
- t.Fatalf("Could not upsert points: %v", err)
- } else {
- log.Println("Upsert", len(upsertPoints), "points")
- }
-
- // Retrieve points by ids
- pointsById, err := pointsClient.Get(ctx, &pb.GetPoints{
- CollectionName: collectionName,
- Ids: []*pb.PointId{
- {PointIdOptions: &pb.PointId_Num{Num: 1}},
- {PointIdOptions: &pb.PointId_Num{Num: 2}},
- },
- })
- if err != nil {
- t.Fatalf("Could not retrieve points: %v", err)
- } else {
- log.Printf("Retrieved points: %s", pointsById.GetResult())
- }
- })
-
- t.Run("Unfiltered search", func(t *testing.T) {
- unfilteredSearchResult, err := pointsClient.Search(ctx, &pb.SearchPoints{
- CollectionName: collectionName,
- Vector: []float32{0.2, 0.1, 0.9, 0.7},
- Limit: 3,
- // Include all payload and vectors in the search result
- WithVectors: &pb.WithVectorsSelector{SelectorOptions: &pb.WithVectorsSelector_Enable{Enable: true}},
- WithPayload: &pb.WithPayloadSelector{SelectorOptions: &pb.WithPayloadSelector_Enable{Enable: true}},
- })
- if err != nil {
- t.Fatalf("Could not search points: %v", err)
- } else {
- log.Printf("Found points: %s", unfilteredSearchResult.GetResult())
- }
- })
-
- t.Run("Filtered search", func(t *testing.T) {
- filteredSearchResult, err := pointsClient.Search(ctx, &pb.SearchPoints{
- CollectionName: collectionName,
- Vector: []float32{0.2, 0.1, 0.9, 0.7},
- Limit: 3,
- Filter: &pb.Filter{
- Should: []*pb.Condition{
- {
- ConditionOneOf: &pb.Condition_Field{
- Field: &pb.FieldCondition{
- Key: "city",
- Match: &pb.Match{
- MatchValue: &pb.Match_Keyword{
- Keyword: "London",
- },
- },
- },
- },
- },
- },
- },
- })
- if err != nil {
- t.Fatalf("Could not search points: %v", err)
- } else {
- log.Printf("Found points: %s", filteredSearchResult.GetResult())
- }
- })
-}
diff --git a/qdrant/snapshots.go b/qdrant/snapshots.go
new file mode 100644
index 0000000..a9ec7f7
--- /dev/null
+++ b/qdrant/snapshots.go
@@ -0,0 +1,116 @@
+package qdrant
+
+import (
+ "context"
+)
+
+// Creates a snapshot of a specific collection.
+// Snapshots are read-only copies of the collection data, which can be used for backup and restore purposes.
+// The snapshot is created asynchronously and does not block the collection usage.
+//
+// Parameters:
+// - ctx: The context for the request
+// - collection: The name of the collection to create a snapshot for
+//
+// Returns:
+// - *SnapshotDescription: Description of the created snapshot
+// - error: Any error encountered during the snapshot creation
+func (c *Client) CreateSnapshot(ctx context.Context, collection string) (*SnapshotDescription, error) {
+ resp, err := c.GetSnapshotsClient().Create(ctx, &CreateSnapshotRequest{
+ CollectionName: collection,
+ })
+ if err != nil {
+ return nil, newQdrantErr(err, "CreateSnapshot")
+ }
+ return resp.GetSnapshotDescription(), nil
+}
+
+// Retrieves a list of all snapshots for a specific collection.
+//
+// Parameters:
+// - ctx: The context for the request
+// - collection: The name of the collection to list snapshots for
+//
+// Returns:
+// - []*SnapshotDescription: A slice of snapshot descriptions
+// - error: Any error encountered while listing snapshots
+func (c *Client) ListSnapshots(ctx context.Context, collection string) ([]*SnapshotDescription, error) {
+ resp, err := c.GetSnapshotsClient().List(ctx, &ListSnapshotsRequest{
+ CollectionName: collection,
+ })
+ if err != nil {
+ return nil, newQdrantErr(err, "ListSnapshots")
+ }
+ return resp.GetSnapshotDescriptions(), nil
+}
+
+// Removes a specific snapshot of a collection.
+//
+// Parameters:
+// - ctx: The context for the request
+// - collection: The name of the collection the snapshot belongs to
+// - snapshot: The name of the snapshot to delete
+//
+// Returns:
+// - error: Any error encountered while deleting the snapshot
+func (c *Client) DeleteSnapshot(ctx context.Context, collection string, snapshot string) error {
+ _, err := c.GetSnapshotsClient().Delete(ctx, &DeleteSnapshotRequest{
+ CollectionName: collection,
+ SnapshotName: snapshot,
+ })
+ if err != nil {
+ return newQdrantErr(err, "DeleteSnapshot")
+ }
+ return nil
+}
+
+// Creates a snapshot of the entire storage, including all collections.
+// This operation is useful for creating full backups of the Qdrant instance.
+//
+// Parameters:
+// - ctx: The context for the request
+//
+// Returns:
+// - *SnapshotDescription: Description of the created full snapshot
+// - error: Any error encountered during the full snapshot creation
+func (c *Client) CreateFullSnapshot(ctx context.Context) (*SnapshotDescription, error) {
+ resp, err := c.GetSnapshotsClient().CreateFull(ctx, &CreateFullSnapshotRequest{})
+ if err != nil {
+ return nil, newQdrantErr(err, "CreateFullSnapshot")
+ }
+ return resp.GetSnapshotDescription(), nil
+}
+
+// ListFullSnapshots retrieves a list of all full snapshots of the storage.
+//
+// Parameters:
+// - ctx: The context for the request
+//
+// Returns:
+// - []*SnapshotDescription: A slice of full snapshot descriptions
+// - error: Any error encountered while listing full snapshots
+func (c *Client) ListFullSnapshots(ctx context.Context) ([]*SnapshotDescription, error) {
+ resp, err := c.GetSnapshotsClient().ListFull(ctx, &ListFullSnapshotsRequest{})
+ if err != nil {
+ return nil, newQdrantErr(err, "ListFullSnapshots")
+ }
+ return resp.GetSnapshotDescriptions(), nil
+}
+
+// Removes a specific full snapshot of the storage.
+//
+// Parameters:
+// - ctx: The context for the request
+// - snapshot: The name of the full snapshot to delete
+//
+// Returns:
+// - error: Any error encountered while deleting the full snapshot
+func (c *Client) DeleteFullSnapshot(ctx context.Context, snapshot string) error {
+ _, err := c.GetSnapshotsClient().DeleteFull(ctx, &DeleteFullSnapshotRequest{
+ SnapshotName: snapshot,
+ })
+ if err != nil {
+ return newQdrantErr(err, "DeleteFullSnapshot")
+ }
+ return nil
+}
diff --git a/qdrant/snapshots_service.pb.go b/qdrant/snapshots_service.pb.go
index efc2c07..7fba7ef 100644
--- a/qdrant/snapshots_service.pb.go
+++ b/qdrant/snapshots_service.pb.go
@@ -1,10 +1,10 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// versions:
-// protoc-gen-go v1.26.0
-// protoc v4.22.2
+// protoc-gen-go v1.34.2
+// protoc v4.25.1
// source: snapshots_service.proto
-package go_client
+package qdrant
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
@@ -626,7 +626,7 @@ func file_snapshots_service_proto_rawDescGZIP() []byte {
}
var file_snapshots_service_proto_msgTypes = make([]protoimpl.MessageInfo, 10)
-var file_snapshots_service_proto_goTypes = []interface{}{
+var file_snapshots_service_proto_goTypes = []any{
(*CreateFullSnapshotRequest)(nil), // 0: qdrant.CreateFullSnapshotRequest
(*ListFullSnapshotsRequest)(nil), // 1: qdrant.ListFullSnapshotsRequest
(*DeleteFullSnapshotRequest)(nil), // 2: qdrant.DeleteFullSnapshotRequest
@@ -668,7 +668,7 @@ func file_snapshots_service_proto_init() {
return
}
if !protoimpl.UnsafeEnabled {
- file_snapshots_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
+ file_snapshots_service_proto_msgTypes[0].Exporter = func(v any, i int) any {
switch v := v.(*CreateFullSnapshotRequest); i {
case 0:
return &v.state
@@ -680,7 +680,7 @@ func file_snapshots_service_proto_init() {
return nil
}
}
- file_snapshots_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
+ file_snapshots_service_proto_msgTypes[1].Exporter = func(v any, i int) any {
switch v := v.(*ListFullSnapshotsRequest); i {
case 0:
return &v.state
@@ -692,7 +692,7 @@ func file_snapshots_service_proto_init() {
return nil
}
}
- file_snapshots_service_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
+ file_snapshots_service_proto_msgTypes[2].Exporter = func(v any, i int) any {
switch v := v.(*DeleteFullSnapshotRequest); i {
case 0:
return &v.state
@@ -704,7 +704,7 @@ func file_snapshots_service_proto_init() {
return nil
}
}
- file_snapshots_service_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
+ file_snapshots_service_proto_msgTypes[3].Exporter = func(v any, i int) any {
switch v := v.(*CreateSnapshotRequest); i {
case 0:
return &v.state
@@ -716,7 +716,7 @@ func file_snapshots_service_proto_init() {
return nil
}
}
- file_snapshots_service_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
+ file_snapshots_service_proto_msgTypes[4].Exporter = func(v any, i int) any {
switch v := v.(*ListSnapshotsRequest); i {
case 0:
return &v.state
@@ -728,7 +728,7 @@ func file_snapshots_service_proto_init() {
return nil
}
}
- file_snapshots_service_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} {
+ file_snapshots_service_proto_msgTypes[5].Exporter = func(v any, i int) any {
switch v := v.(*DeleteSnapshotRequest); i {
case 0:
return &v.state
@@ -740,7 +740,7 @@ func file_snapshots_service_proto_init() {
return nil
}
}
- file_snapshots_service_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
+ file_snapshots_service_proto_msgTypes[6].Exporter = func(v any, i int) any {
switch v := v.(*SnapshotDescription); i {
case 0:
return &v.state
@@ -752,7 +752,7 @@ func file_snapshots_service_proto_init() {
return nil
}
}
- file_snapshots_service_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
+ file_snapshots_service_proto_msgTypes[7].Exporter = func(v any, i int) any {
switch v := v.(*CreateSnapshotResponse); i {
case 0:
return &v.state
@@ -764,7 +764,7 @@ func file_snapshots_service_proto_init() {
return nil
}
}
- file_snapshots_service_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
+ file_snapshots_service_proto_msgTypes[8].Exporter = func(v any, i int) any {
switch v := v.(*ListSnapshotsResponse); i {
case 0:
return &v.state
@@ -776,7 +776,7 @@ func file_snapshots_service_proto_init() {
return nil
}
}
- file_snapshots_service_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
+ file_snapshots_service_proto_msgTypes[9].Exporter = func(v any, i int) any {
switch v := v.(*DeleteSnapshotResponse); i {
case 0:
return &v.state
@@ -789,7 +789,7 @@ func file_snapshots_service_proto_init() {
}
}
}
- file_snapshots_service_proto_msgTypes[6].OneofWrappers = []interface{}{}
+ file_snapshots_service_proto_msgTypes[6].OneofWrappers = []any{}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
diff --git a/qdrant/snapshots_service_grpc.pb.go b/qdrant/snapshots_service_grpc.pb.go
index ffe2d74..b993888 100644
--- a/qdrant/snapshots_service_grpc.pb.go
+++ b/qdrant/snapshots_service_grpc.pb.go
@@ -1,22 +1,29 @@
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
// versions:
-// - protoc-gen-go-grpc v1.2.0
-// - protoc v4.22.2
+// - protoc-gen-go-grpc v1.5.1
+// - protoc v4.25.1
// source: snapshots_service.proto
-package go_client
+package qdrant
import (
context "context"
grpc "google.golang.org/grpc"
- codes "google.golang.org/grpc/codes"
- status "google.golang.org/grpc/status"
)
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
-// Requires gRPC-Go v1.32.0 or later.
-const _ = grpc.SupportPackageIsVersion7
+// Requires gRPC-Go v1.64.0 or later.
+const _ = grpc.SupportPackageIsVersion9
+
+const (
+ Snapshots_Create_FullMethodName = "/qdrant.Snapshots/Create"
+ Snapshots_List_FullMethodName = "/qdrant.Snapshots/List"
+ Snapshots_Delete_FullMethodName = "/qdrant.Snapshots/Delete"
+ Snapshots_CreateFull_FullMethodName = "/qdrant.Snapshots/CreateFull"
+ Snapshots_ListFull_FullMethodName = "/qdrant.Snapshots/ListFull"
+ Snapshots_DeleteFull_FullMethodName = "/qdrant.Snapshots/DeleteFull"
+)
// SnapshotsClient is the client API for Snapshots service.
//
@@ -45,8 +52,9 @@ func NewSnapshotsClient(cc grpc.ClientConnInterface) SnapshotsClient {
}
func (c *snapshotsClient) Create(ctx context.Context, in *CreateSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CreateSnapshotResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Snapshots/Create", in, out, opts...)
+ err := c.cc.Invoke(ctx, Snapshots_Create_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -54,8 +62,9 @@ func (c *snapshotsClient) Create(ctx context.Context, in *CreateSnapshotRequest,
}
func (c *snapshotsClient) List(ctx context.Context, in *ListSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListSnapshotsResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Snapshots/List", in, out, opts...)
+ err := c.cc.Invoke(ctx, Snapshots_List_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -63,8 +72,9 @@ func (c *snapshotsClient) List(ctx context.Context, in *ListSnapshotsRequest, op
}
func (c *snapshotsClient) Delete(ctx context.Context, in *DeleteSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeleteSnapshotResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Snapshots/Delete", in, out, opts...)
+ err := c.cc.Invoke(ctx, Snapshots_Delete_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -72,8 +82,9 @@ func (c *snapshotsClient) Delete(ctx context.Context, in *DeleteSnapshotRequest,
}
func (c *snapshotsClient) CreateFull(ctx context.Context, in *CreateFullSnapshotRequest, opts ...grpc.CallOption) (*CreateSnapshotResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(CreateSnapshotResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Snapshots/CreateFull", in, out, opts...)
+ err := c.cc.Invoke(ctx, Snapshots_CreateFull_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -81,8 +92,9 @@ func (c *snapshotsClient) CreateFull(ctx context.Context, in *CreateFullSnapshot
}
func (c *snapshotsClient) ListFull(ctx context.Context, in *ListFullSnapshotsRequest, opts ...grpc.CallOption) (*ListSnapshotsResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(ListSnapshotsResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Snapshots/ListFull", in, out, opts...)
+ err := c.cc.Invoke(ctx, Snapshots_ListFull_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
@@ -90,208 +102,11 @@ func (c *snapshotsClient) ListFull(ctx context.Context, in *ListFullSnapshotsReq
}
func (c *snapshotsClient) DeleteFull(ctx context.Context, in *DeleteFullSnapshotRequest, opts ...grpc.CallOption) (*DeleteSnapshotResponse, error) {
+ cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...)
out := new(DeleteSnapshotResponse)
- err := c.cc.Invoke(ctx, "/qdrant.Snapshots/DeleteFull", in, out, opts...)
+ err := c.cc.Invoke(ctx, Snapshots_DeleteFull_FullMethodName, in, out, cOpts...)
if err != nil {
return nil, err
}
return out, nil
}
-
-// SnapshotsServer is the server API for Snapshots service.
-// All implementations must embed UnimplementedSnapshotsServer
-// for forward compatibility
-type SnapshotsServer interface {
- // Create collection snapshot
- Create(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error)
- // List collection snapshots
- List(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error)
- // Delete collection snapshot
- Delete(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error)
- // Create full storage snapshot
- CreateFull(context.Context, *CreateFullSnapshotRequest) (*CreateSnapshotResponse, error)
- // List full storage snapshots
- ListFull(context.Context, *ListFullSnapshotsRequest) (*ListSnapshotsResponse, error)
- // Delete full storage snapshot
- DeleteFull(context.Context, *DeleteFullSnapshotRequest) (*DeleteSnapshotResponse, error)
- mustEmbedUnimplementedSnapshotsServer()
-}
-
-// UnimplementedSnapshotsServer must be embedded to have forward compatible implementations.
-type UnimplementedSnapshotsServer struct {
-}
-
-func (UnimplementedSnapshotsServer) Create(context.Context, *CreateSnapshotRequest) (*CreateSnapshotResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Create not implemented")
-}
-func (UnimplementedSnapshotsServer) List(context.Context, *ListSnapshotsRequest) (*ListSnapshotsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method List not implemented")
-}
-func (UnimplementedSnapshotsServer) Delete(context.Context, *DeleteSnapshotRequest) (*DeleteSnapshotResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented")
-}
-func (UnimplementedSnapshotsServer) CreateFull(context.Context, *CreateFullSnapshotRequest) (*CreateSnapshotResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method CreateFull not implemented")
-}
-func (UnimplementedSnapshotsServer) ListFull(context.Context, *ListFullSnapshotsRequest) (*ListSnapshotsResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method ListFull not implemented")
-}
-func (UnimplementedSnapshotsServer) DeleteFull(context.Context, *DeleteFullSnapshotRequest) (*DeleteSnapshotResponse, error) {
- return nil, status.Errorf(codes.Unimplemented, "method DeleteFull not implemented")
-}
-func (UnimplementedSnapshotsServer) mustEmbedUnimplementedSnapshotsServer() {}
-
-// UnsafeSnapshotsServer may be embedded to opt out of forward compatibility for this service.
-// Use of this interface is not recommended, as added methods to SnapshotsServer will
-// result in compilation errors.
-type UnsafeSnapshotsServer interface {
- mustEmbedUnimplementedSnapshotsServer()
-}
-
-func RegisterSnapshotsServer(s grpc.ServiceRegistrar, srv SnapshotsServer) {
- s.RegisterService(&Snapshots_ServiceDesc, srv)
-}
-
-func _Snapshots_Create_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateSnapshotRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SnapshotsServer).Create(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Snapshots/Create",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SnapshotsServer).Create(ctx, req.(*CreateSnapshotRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Snapshots_List_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListSnapshotsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SnapshotsServer).List(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Snapshots/List",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SnapshotsServer).List(ctx, req.(*ListSnapshotsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Snapshots_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteSnapshotRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SnapshotsServer).Delete(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Snapshots/Delete",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SnapshotsServer).Delete(ctx, req.(*DeleteSnapshotRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Snapshots_CreateFull_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(CreateFullSnapshotRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SnapshotsServer).CreateFull(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Snapshots/CreateFull",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SnapshotsServer).CreateFull(ctx, req.(*CreateFullSnapshotRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Snapshots_ListFull_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(ListFullSnapshotsRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SnapshotsServer).ListFull(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Snapshots/ListFull",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SnapshotsServer).ListFull(ctx, req.(*ListFullSnapshotsRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-func _Snapshots_DeleteFull_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
- in := new(DeleteFullSnapshotRequest)
- if err := dec(in); err != nil {
- return nil, err
- }
- if interceptor == nil {
- return srv.(SnapshotsServer).DeleteFull(ctx, in)
- }
- info := &grpc.UnaryServerInfo{
- Server: srv,
- FullMethod: "/qdrant.Snapshots/DeleteFull",
- }
- handler := func(ctx context.Context, req interface{}) (interface{}, error) {
- return srv.(SnapshotsServer).DeleteFull(ctx, req.(*DeleteFullSnapshotRequest))
- }
- return interceptor(ctx, in, info, handler)
-}
-
-// Snapshots_ServiceDesc is the grpc.ServiceDesc for Snapshots service.
-// It's only intended for direct use with grpc.RegisterService,
-// and not to be introspected or modified (even as a copy)
-var Snapshots_ServiceDesc = grpc.ServiceDesc{
- ServiceName: "qdrant.Snapshots",
- HandlerType: (*SnapshotsServer)(nil),
- Methods: []grpc.MethodDesc{
- {
- MethodName: "Create",
- Handler: _Snapshots_Create_Handler,
- },
- {
- MethodName: "List",
- Handler: _Snapshots_List_Handler,
- },
- {
- MethodName: "Delete",
- Handler: _Snapshots_Delete_Handler,
- },
- {
- MethodName: "CreateFull",
- Handler: _Snapshots_CreateFull_Handler,
- },
- {
- MethodName: "ListFull",
- Handler: _Snapshots_ListFull_Handler,
- },
- {
- MethodName: "DeleteFull",
- Handler: _Snapshots_DeleteFull_Handler,
- },
- },
- Streams: []grpc.StreamDesc{},
- Metadata: "snapshots_service.proto",
-}
diff --git a/qdrant/value_map.go b/qdrant/value_map.go
new file mode 100644
index 0000000..4d244ed
--- /dev/null
+++ b/qdrant/value_map.go
@@ -0,0 +1,180 @@
+// This file contains methods to convert a generic map to map of string to *grpc.Value(Qdrant's payload type).
+// This is a custom implementatation based on "google.golang.org/protobuf/types/known/structpb".
+// It extends the original implementation to support IntegerValue and DoubleValue instead of a single NumberValue.
+// https://github.com/qdrant/qdrant/blob/master/lib/api/src/grpc/proto/json_with_int.proto
+//
+// USAGE:
+//
+// jsonMap := map[string]any{
+// "some_null": nil,
+// "some_bool": true,
+// "some_int": 42,
+// "some_float": 3.14,
+// "some_string": "hello",
+// "some_bytes": []byte("world"),
+// "some_nested": map[string]any{"key": "value"},
+// "some_list": []any{"foo", 32},
+// }
+//
+// valueMap := NewValueMap(jsonMap)
+
+package qdrant
+
+import (
+ "encoding/base64"
+ "fmt"
+ "unicode/utf8"
+)
+
+// Converts a map of string to any to a map of string to *grpc.Value
+// NOTE: This function panics if the conversion fails. Use TryValueMap() to have errors returned.
+//
+// ββββββββββββββββββββββββββ€βββββββββββββββββββββββββββββββββββββββββββββ
+// β Go type β Conversion β
+// β βββββββββββββββββββββββββͺβββββββββββββββββββββββββββββββββββββββββββββ£
+// β nil β stored as NullValue β
+// β bool β stored as BoolValue β
+// β int, int32, int64 β stored as IntegerValue β
+// β uint, uint32, uint64 β stored as IntegerValue β
+// β float32, float64 β stored as DoubleValue β
+// β string β stored as StringValue; must be valid UTF-8 β
+// β []byte β stored as StringValue; base64-encoded β
+// β map[string]interface{} β stored as StructValue β
+// β []interface{} β stored as ListValue β
+// ββββββββββββββββββββββββββ§βββββββββββββββββββββββββββββββββββββββββββββ
+func NewValueMap(inputMap map[string]any) map[string]*Value {
+ valueMap, err := TryValueMap(inputMap)
+ if err != nil {
+ panic(err)
+ }
+ return valueMap
+}
+
+// Converts a map of string to any to a map of string to *grpc.Value
+// Returns an error if the conversion fails.
+func TryValueMap(inputMap map[string]any) (map[string]*Value, error) {
+ valueMap := make(map[string]*Value)
+ for key, val := range inputMap {
+ value, err := NewValue(val)
+ if err != nil {
+ return nil, err
+ }
+ valueMap[key] = value
+ }
+ return valueMap, nil
+}
+
+// Constructs a *Value from a generic Go interface.
+func NewValue(v any) (*Value, error) {
+ switch v := v.(type) {
+ case nil:
+ return NewValueNull(), nil
+ case bool:
+ return NewValueBool(v), nil
+ case int:
+ return NewValueInt(int64(v)), nil
+ case int32:
+ return NewValueInt(int64(v)), nil
+ case int64:
+ return NewValueInt(v), nil
+ case uint:
+ return NewValueInt(int64(v)), nil
+ case uint32:
+ return NewValueInt(int64(v)), nil
+ case uint64:
+ return NewValueInt(int64(v)), nil
+ case float32:
+ return NewValueDouble(float64(v)), nil
+ case float64:
+ return NewValueDouble(float64(v)), nil
+ case string:
+ if !utf8.ValidString(v) {
+ return nil, fmt.Errorf("invalid UTF-8 in string: %q", v)
+ }
+ return NewValueString(v), nil
+ case []byte:
+ s := base64.StdEncoding.EncodeToString(v)
+ return NewValueString(s), nil
+ case map[string]interface{}:
+ v2, err := NewStruct(v)
+ if err != nil {
+ return nil, err
+ }
+ return NewValueStruct(v2), nil
+ case []interface{}:
+ v2, err := NewListValue(v)
+ if err != nil {
+ return nil, err
+ }
+ return NewValueList(v2), nil
+ default:
+ return nil, fmt.Errorf("invalid type: %T", v)
+ }
+}
+
+// Constructs a new null Value.
+func NewValueNull() *Value {
+ return &Value{Kind: &Value_NullValue{NullValue: NullValue_NULL_VALUE}}
+}
+
+// Constructs a new boolean Value.
+func NewValueBool(v bool) *Value {
+ return &Value{Kind: &Value_BoolValue{BoolValue: v}}
+}
+
+// Constructs a new integer Value.
+func NewValueInt(v int64) *Value {
+ return &Value{Kind: &Value_IntegerValue{IntegerValue: v}}
+}
+
+// Constructs a new double Value.
+func NewValueDouble(v float64) *Value {
+ return &Value{Kind: &Value_DoubleValue{DoubleValue: v}}
+}
+
+// Constructs a new string Value.
+func NewValueString(v string) *Value {
+ return &Value{Kind: &Value_StringValue{StringValue: v}}
+}
+
+// Constructs a new struct Value.
+func NewValueStruct(v *Struct) *Value {
+ return &Value{Kind: &Value_StructValue{StructValue: v}}
+}
+
+// Constructs a new list Value.
+func NewValueList(v *ListValue) *Value {
+ return &Value{Kind: &Value_ListValue{ListValue: v}}
+}
+
+// Constructs a ListValue from a general-purpose Go slice.
+// The slice elements are converted using NewValue().
+func NewListValue(v []interface{}) (*ListValue, error) {
+ x := &ListValue{Values: make([]*Value, len(v))}
+ for i, v := range v {
+ var err error
+ x.Values[i], err = NewValue(v)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return x, nil
+}
+
+// Constructs a Struct from a general-purpose Go map.
+// The map keys must be valid UTF-8.
+// The map values are converted using NewValue().
+func NewStruct(v map[string]interface{}) (*Struct, error) {
+ x := &Struct{Fields: make(map[string]*Value, len(v))}
+ for k, v := range v {
+ if !utf8.ValidString(k) {
+ return nil, fmt.Errorf("invalid UTF-8 in string: %q", k)
+ }
+ var err error
+ x.Fields[k], err = NewValue(v)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return x, nil
+}
diff --git a/qdrant_test/collections_test.go b/qdrant_test/collections_test.go
new file mode 100644
index 0000000..58a2943
--- /dev/null
+++ b/qdrant_test/collections_test.go
@@ -0,0 +1,188 @@
+package qdrant_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/qdrant/go-client/qdrant"
+ "github.com/stretchr/testify/require"
+)
+
+func TestCollectionsClient(t *testing.T) {
+ collectionName := t.Name()
+ vectorSize := uint64(384)
+ distance := qdrant.Distance_Cosine
+ apiKey := ""
+
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+
+ container, err := distributedQdrant(ctx, apiKey)
+ require.NoError(t, err)
+
+ err = container.Start(ctx)
+ require.NoError(t, err)
+
+ t.Cleanup(func() {
+ err := container.Terminate(ctx)
+ require.NoError(t, err)
+ })
+ host, err := container.Host(ctx)
+ require.NoError(t, err)
+
+ port, err := container.MappedPort(ctx, "6334/tcp")
+ require.NoError(t, err)
+
+ client, err := qdrant.NewClient(&qdrant.Config{
+ Host: host,
+ Port: port.Int(),
+ APIKey: apiKey,
+ })
+ require.NoError(t, err)
+
+ t.Run("CreateCollection", func(t *testing.T) {
+ err := client.CreateCollection(ctx, &qdrant.CreateCollection{
+ CollectionName: collectionName,
+ VectorsConfig: qdrant.NewVectorsConfig(&qdrant.VectorParams{
+ Size: vectorSize,
+ Distance: distance,
+ }),
+ ShardingMethod: qdrant.ShardingMethod_Custom.Enum(),
+ })
+ require.NoError(t, err)
+
+ _, err = client.GetCollectionInfo(ctx, collectionName)
+ require.NoError(t, err)
+ })
+
+ t.Run("CollectionExists", func(t *testing.T) {
+ exists, err := client.CollectionExists(ctx, collectionName)
+ require.NoError(t, err)
+ require.True(t, exists)
+ })
+
+ t.Run("GetCollection", func(t *testing.T) {
+ collInfo, err := client.GetCollectionInfo(ctx, collectionName)
+ require.NoError(t, err)
+ require.Zero(t, collInfo.GetPointsCount())
+ })
+
+ t.Run("ListCollections", func(t *testing.T) {
+ collections, err := client.ListCollections(ctx)
+ require.NoError(t, err)
+ require.Contains(t, collections, collectionName)
+ })
+
+ t.Run("UpdateCollection", func(t *testing.T) {
+ threshold := uint64(1000)
+ err := client.UpdateCollection(ctx, &qdrant.UpdateCollection{
+ CollectionName: collectionName,
+ OptimizersConfig: &qdrant.OptimizersConfigDiff{
+ IndexingThreshold: &threshold,
+ },
+ })
+ require.NoError(t, err)
+
+ collInfo, err := client.GetCollectionInfo(ctx, collectionName)
+ require.NoError(t, err)
+ require.Equal(t, threshold, collInfo.GetConfig().GetOptimizerConfig().GetIndexingThreshold())
+ })
+
+ t.Run("AliasOperations", func(t *testing.T) {
+ aliasName := "test_alias"
+ newAliasName := "new_test_alias"
+
+ t.Run("CreateAlias", func(t *testing.T) {
+ err := client.CreateAlias(ctx, aliasName, collectionName)
+ require.NoError(t, err)
+
+ aliases, err := client.ListCollectionAliases(ctx, collectionName)
+ require.NoError(t, err)
+ require.Contains(t, aliases, aliasName)
+ })
+
+ t.Run("ListCollectionAliases", func(t *testing.T) {
+ aliases, err := client.ListCollectionAliases(ctx, collectionName)
+ require.NoError(t, err)
+ require.Contains(t, aliases, aliasName)
+ })
+
+ t.Run("ListAliases", func(t *testing.T) {
+ allAliases, err := client.ListAliases(ctx)
+ require.NoError(t, err)
+ require.NotEmpty(t, allAliases)
+ })
+
+ t.Run("RenameAlias", func(t *testing.T) {
+ err := client.RenameAlias(ctx, aliasName, newAliasName)
+ require.NoError(t, err)
+
+ aliases, err := client.ListCollectionAliases(ctx, collectionName)
+ require.NoError(t, err)
+ require.Contains(t, aliases, newAliasName)
+ require.NotContains(t, aliases, aliasName)
+ })
+
+ t.Run("DeleteAlias", func(t *testing.T) {
+ err := client.DeleteAlias(ctx, newAliasName)
+ require.NoError(t, err)
+
+ aliases, err := client.ListCollectionAliases(ctx, collectionName)
+ require.NoError(t, err)
+ require.NotContains(t, aliases, newAliasName)
+ })
+ })
+
+ t.Run("ShardKeyOperations", func(t *testing.T) {
+ shardKey := "test_shard_key"
+
+ t.Run("CreateShardKey", func(t *testing.T) {
+ err := client.CreateShardKey(ctx, collectionName, &qdrant.CreateShardKey{
+ ShardKey: qdrant.NewShardKey(shardKey),
+ })
+ require.NoError(t, err)
+ })
+
+ t.Run("DeleteShardKey", func(t *testing.T) {
+ err := client.DeleteShardKey(ctx, collectionName, &qdrant.DeleteShardKey{
+ ShardKey: qdrant.NewShardKey(shardKey),
+ })
+ require.NoError(t, err)
+ })
+ })
+
+ t.Run("DeleteCollection", func(t *testing.T) {
+ err := client.DeleteCollection(ctx, collectionName)
+ require.NoError(t, err)
+
+ exists, err := client.CollectionExists(ctx, collectionName)
+ require.NoError(t, err)
+ require.False(t, exists)
+ })
+
+ t.Run("CreateCollectionWithInvalidParams", func(t *testing.T) {
+ err := client.CreateCollection(ctx, &qdrant.CreateCollection{
+ CollectionName: "",
+ VectorsConfig: qdrant.NewVectorsConfig(&qdrant.VectorParams{
+ Size: 0,
+ Distance: qdrant.Distance_Cosine,
+ }),
+ })
+ require.Error(t, err)
+ })
+
+ t.Run("UpdateNonExistentCollection", func(t *testing.T) {
+ err := client.UpdateCollection(ctx, &qdrant.UpdateCollection{
+ CollectionName: "non_existent_collection",
+ OptimizersConfig: &qdrant.OptimizersConfigDiff{
+ IndexingThreshold: new(uint64),
+ },
+ })
+ require.Error(t, err)
+ })
+
+ t.Run("DeleteNonExistentCollection", func(t *testing.T) {
+ err := client.DeleteCollection(ctx, "non_existent_collection")
+ require.Error(t, err)
+ })
+}
diff --git a/qdrant_test/image_test.go b/qdrant_test/image_test.go
new file mode 100644
index 0000000..b1787e5
--- /dev/null
+++ b/qdrant_test/image_test.go
@@ -0,0 +1,51 @@
+package qdrant_test
+
+import (
+ "context"
+ "time"
+
+ "github.com/testcontainers/testcontainers-go"
+ "github.com/testcontainers/testcontainers-go/wait"
+)
+
+const TestImage string = "qdrant/qdrant:v1.11.3"
+
+// We use an instance with distributed mode enabled
+// to test methods like CreateShardKey(), DeleteShardKey().
+func distributedQdrant(ctx context.Context, apiKey string) (testcontainers.Container, error) {
+ req := testcontainers.ContainerRequest{
+ Image: TestImage,
+ ExposedPorts: []string{"6334/tcp"},
+ Env: map[string]string{
+ "QDRANT__CLUSTER__ENABLED": "true",
+ "QDRANT__SERVICE__API_KEY": apiKey,
+ },
+ Cmd: []string{"./qdrant", "--uri", "http://qdrant_node_1:6335"},
+ WaitingFor: wait.ForAll(
+ wait.ForListeningPort("6334/tcp").WithStartupTimeout(5 * time.Second),
+ ),
+ }
+ container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
+ ContainerRequest: req,
+ })
+
+ return container, err
+}
+
+func standaloneQdrant(ctx context.Context, apiKey string) (testcontainers.Container, error) {
+ req := testcontainers.ContainerRequest{
+ Image: TestImage,
+ ExposedPorts: []string{"6334/tcp"},
+ Env: map[string]string{
+ "QDRANT__SERVICE__API_KEY": apiKey,
+ },
+ WaitingFor: wait.ForAll(
+ wait.ForListeningPort("6334/tcp").WithStartupTimeout(5 * time.Second),
+ ),
+ }
+ container, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{
+ ContainerRequest: req,
+ })
+
+ return container, err
+}
diff --git a/qdrant_test/points_test.go b/qdrant_test/points_test.go
new file mode 100644
index 0000000..7879add
--- /dev/null
+++ b/qdrant_test/points_test.go
@@ -0,0 +1,297 @@
+package qdrant_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/qdrant/go-client/qdrant"
+ "github.com/stretchr/testify/require"
+)
+
+func TestPointsClient(t *testing.T) {
+ collectionName := t.Name()
+ vectorSize := uint64(4)
+ distance := qdrant.Distance_Cosine
+ apiKey := ""
+
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+
+ container, err := standaloneQdrant(ctx, apiKey)
+ require.NoError(t, err)
+
+ err = container.Start(ctx)
+ require.NoError(t, err)
+
+ t.Cleanup(func() {
+ err := container.Terminate(ctx)
+ require.NoError(t, err)
+ })
+ host, err := container.Host(ctx)
+ require.NoError(t, err)
+
+ port, err := container.MappedPort(ctx, "6334/tcp")
+ require.NoError(t, err)
+
+ client, err := qdrant.NewClient(&qdrant.Config{
+ Host: host,
+ Port: port.Int(),
+ APIKey: apiKey,
+ })
+ require.NoError(t, err)
+
+ err = client.CreateCollection(ctx, &qdrant.CreateCollection{
+ CollectionName: collectionName,
+ VectorsConfig: qdrant.NewVectorsConfig(&qdrant.VectorParams{
+ Size: vectorSize,
+ Distance: distance,
+ }),
+ })
+ require.NoError(t, err)
+
+ testPointID := qdrant.NewID("ed7ac159-d8a7-41fb-9da3-66a14916330f")
+ wait := true
+
+ t.Run("UpsertPoints", func(t *testing.T) {
+ points := []*qdrant.PointStruct{
+ {
+ Id: testPointID,
+ Vectors: qdrant.NewVectors(0.1, 0.2, 0.3, 0.4),
+ },
+ }
+ res, err := client.Upsert(ctx, &qdrant.UpsertPoints{
+ CollectionName: collectionName,
+ Points: points,
+ Wait: &wait,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+
+ // Test with invalid vector size
+ points[0].Vectors = qdrant.NewVectors(0.1, 0.2)
+ res, err = client.Upsert(ctx, &qdrant.UpsertPoints{
+ CollectionName: collectionName,
+ Points: points,
+ Wait: &wait,
+ })
+ require.Error(t, err)
+ require.Nil(t, res)
+ })
+
+ t.Run("GetPoints", func(t *testing.T) {
+ points, err := client.Get(ctx, &qdrant.GetPoints{
+ CollectionName: collectionName,
+ Ids: []*qdrant.PointId{
+ testPointID,
+ },
+ })
+ require.NoError(t, err)
+ require.Len(t, points, 1)
+
+ // Test with non-existent point ID
+ points, err = client.Get(ctx, &qdrant.GetPoints{
+ CollectionName: collectionName,
+ Ids: []*qdrant.PointId{
+ qdrant.NewIDNum(423),
+ },
+ })
+ require.NoError(t, err)
+ require.Empty(t, points)
+ })
+
+ t.Run("CountPoints", func(t *testing.T) {
+ count, err := client.Count(ctx, &qdrant.CountPoints{
+ CollectionName: collectionName,
+ })
+ require.NoError(t, err)
+ require.Equal(t, uint64(1), count)
+ })
+
+ t.Run("ScrollPoints", func(t *testing.T) {
+ points, err := client.Scroll(ctx, &qdrant.ScrollPoints{
+ CollectionName: collectionName,
+ })
+ require.NoError(t, err)
+ require.Len(t, points, 1)
+ })
+
+ t.Run("UpdateVectors", func(t *testing.T) {
+ points := []*qdrant.PointVectors{
+ {
+ Id: testPointID,
+ Vectors: qdrant.NewVectors(0.4, 0.5, 0.6, 0.7),
+ },
+ }
+ res, err := client.UpdateVectors(ctx, &qdrant.UpdatePointVectors{
+ CollectionName: collectionName,
+ Points: points,
+ Wait: &wait,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+
+ // Test with invalid vector size
+ points[0].Vectors = qdrant.NewVectors(0.1, 0.2)
+ res, err = client.UpdateVectors(ctx, &qdrant.UpdatePointVectors{
+ CollectionName: collectionName,
+ Points: points,
+ Wait: &wait,
+ })
+ require.Error(t, err)
+ require.Nil(t, res)
+ })
+
+ t.Run("QueryPoints", func(t *testing.T) {
+ res, err := client.Query(ctx, &qdrant.QueryPoints{
+ CollectionName: collectionName,
+ Query: qdrant.NewQuery(0.1, 0.2, 0.3, 0.4),
+ })
+ require.NoError(t, err)
+ require.Len(t, res, 1)
+
+ // Test with invalid query vector size
+ res, err = client.Query(ctx, &qdrant.QueryPoints{
+ CollectionName: collectionName,
+ Query: qdrant.NewQuery(0.1, 0.2),
+ })
+ require.Error(t, err)
+ require.Nil(t, res)
+ })
+
+ t.Run("QueryBatchPoints", func(t *testing.T) {
+ res, err := client.QueryBatch(ctx, &qdrant.QueryBatchPoints{
+ CollectionName: collectionName,
+ QueryPoints: []*qdrant.QueryPoints{
+ {
+ CollectionName: collectionName,
+ Query: qdrant.NewQuery(0.1, 0.2, 0.3, 0.4),
+ },
+ },
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ })
+
+ t.Run("QueryGroups", func(t *testing.T) {
+ groups, err := client.QueryGroups(ctx, &qdrant.QueryPointGroups{
+ CollectionName: collectionName,
+ Query: qdrant.NewQuery(0.1, 0.2, 0.3, 0.4),
+ GroupBy: "key",
+ })
+ require.NoError(t, err)
+ require.Empty(t, groups)
+ })
+
+ t.Run("DeleteVectors", func(t *testing.T) {
+ res, err := client.DeleteVectors(ctx, &qdrant.DeletePointVectors{
+ CollectionName: collectionName,
+ Vectors: &qdrant.VectorsSelector{
+ // Delete the default/unnamed vector that we're using
+ Names: []string{""},
+ },
+ PointsSelector: qdrant.NewPointsSelector(testPointID),
+ Wait: &wait,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ })
+
+ t.Run("SetPayload", func(t *testing.T) {
+ res, err := client.SetPayload(ctx, &qdrant.SetPayloadPoints{
+ CollectionName: collectionName,
+ PointsSelector: qdrant.NewPointsSelector(testPointID),
+ Payload: qdrant.NewValueMap(map[string]any{
+ "key": "value",
+ "key_2": 32,
+ "key_3": false,
+ }),
+ Wait: &wait,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ })
+
+ t.Run("OverwritePayload", func(t *testing.T) {
+ res, err := client.OverwritePayload(ctx, &qdrant.SetPayloadPoints{
+ CollectionName: collectionName,
+ PointsSelector: qdrant.NewPointsSelector(testPointID),
+ Payload: qdrant.NewValueMap(map[string]any{
+ "key": 10,
+ }),
+ Wait: &wait,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ })
+
+ t.Run("DeletePayload", func(t *testing.T) {
+ res, err := client.DeletePayload(ctx, &qdrant.DeletePayloadPoints{
+ CollectionName: collectionName,
+ PointsSelector: qdrant.NewPointsSelector(testPointID),
+ Keys: []string{"key"},
+ Wait: &wait,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ })
+
+ t.Run("ClearPayload", func(t *testing.T) {
+ res, err := client.ClearPayload(ctx, &qdrant.ClearPayloadPoints{
+ CollectionName: collectionName,
+ Points: qdrant.NewPointsSelector(testPointID),
+ Wait: &wait,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ })
+
+ t.Run("CreateFieldIndex", func(t *testing.T) {
+ res, err := client.CreateFieldIndex(ctx, &qdrant.CreateFieldIndexCollection{
+ CollectionName: collectionName,
+ FieldName: "key",
+ FieldType: qdrant.FieldType_FieldTypeKeyword.Enum(),
+ Wait: &wait,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ })
+
+ t.Run("DeleteFieldIndex", func(t *testing.T) {
+ res, err := client.DeleteFieldIndex(ctx, &qdrant.DeleteFieldIndexCollection{
+ CollectionName: collectionName,
+ FieldName: "key",
+ Wait: &wait,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ })
+
+ t.Run("UpdateBatch", func(t *testing.T) {
+ ops := []*qdrant.PointsUpdateOperation{
+ qdrant.NewPointsUpdateDeletePayload(&qdrant.PointsUpdateOperation_DeletePayload{
+ Keys: []string{"key"},
+ PointsSelector: qdrant.NewPointsSelector(testPointID),
+ }),
+ }
+ res, err := client.UpdateBatch(ctx, &qdrant.UpdateBatchPoints{
+ CollectionName: collectionName,
+ Operations: ops,
+ Wait: &wait,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ })
+
+ t.Run("DeletePoints", func(t *testing.T) {
+ res, err := client.Delete(ctx, &qdrant.DeletePoints{
+ CollectionName: collectionName,
+ Points: qdrant.NewPointsSelector(testPointID),
+ Wait: &wait,
+ })
+ require.NoError(t, err)
+ require.NotNil(t, res)
+ })
+
+ err = client.DeleteCollection(ctx, collectionName)
+ require.NoError(t, err)
+}
diff --git a/qdrant_test/qdrant_test.go b/qdrant_test/qdrant_test.go
new file mode 100644
index 0000000..646b99d
--- /dev/null
+++ b/qdrant_test/qdrant_test.go
@@ -0,0 +1,43 @@
+package qdrant_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/qdrant/go-client/qdrant"
+ "github.com/stretchr/testify/require"
+)
+
+func TestHealthCheck(t *testing.T) {
+ apiKey := ""
+
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+
+ container, err := standaloneQdrant(ctx, apiKey)
+ require.NoError(t, err)
+
+ err = container.Start(ctx)
+ require.NoError(t, err)
+
+ t.Cleanup(func() {
+ err := container.Terminate(ctx)
+ require.NoError(t, err)
+ })
+ host, err := container.Host(ctx)
+ require.NoError(t, err)
+
+ port, err := container.MappedPort(ctx, "6334/tcp")
+ require.NoError(t, err)
+
+ client, err := qdrant.NewClient(&qdrant.Config{
+ Host: host,
+ Port: port.Int(),
+ APIKey: apiKey,
+ })
+ require.NoError(t, err)
+
+ resp, err := client.HealthCheck(ctx)
+ require.NoError(t, err)
+ require.NotNil(t, resp)
+}
diff --git a/qdrant_test/snapshots_test.go b/qdrant_test/snapshots_test.go
new file mode 100644
index 0000000..b11533f
--- /dev/null
+++ b/qdrant_test/snapshots_test.go
@@ -0,0 +1,94 @@
+package qdrant_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/qdrant/go-client/qdrant"
+ "github.com/stretchr/testify/require"
+)
+
+func TestSnapshotsClient(t *testing.T) {
+ collectionName := t.Name()
+ apiKey := ""
+
+ ctx, cancel := context.WithCancel(context.Background())
+ t.Cleanup(cancel)
+
+ container, err := standaloneQdrant(ctx, apiKey)
+ require.NoError(t, err)
+
+ err = container.Start(ctx)
+ require.NoError(t, err)
+
+ t.Cleanup(func() {
+ err := container.Terminate(ctx)
+ require.NoError(t, err)
+ })
+ host, err := container.Host(ctx)
+ require.NoError(t, err)
+
+ port, err := container.MappedPort(ctx, "6334/tcp")
+ require.NoError(t, err)
+
+ client, err := qdrant.NewClient(&qdrant.Config{
+ Host: host,
+ Port: port.Int(),
+ APIKey: apiKey,
+ })
+ require.NoError(t, err)
+
+ err = client.CreateCollection(ctx, &qdrant.CreateCollection{
+ CollectionName: collectionName,
+ VectorsConfig: qdrant.NewVectorsConfig(&qdrant.VectorParams{
+ Size: 4,
+ Distance: qdrant.Distance_Cosine,
+ }),
+ })
+ require.NoError(t, err)
+
+ t.Run("CreateSnapshot", func(t *testing.T) {
+ snapshot, err := client.CreateSnapshot(ctx, collectionName)
+ require.NoError(t, err)
+ require.NotNil(t, snapshot)
+ })
+
+ t.Run("ListSnapshots", func(t *testing.T) {
+ snapshots, err := client.ListSnapshots(ctx, collectionName)
+ require.NoError(t, err)
+ require.NotEmpty(t, snapshots)
+ })
+
+ t.Run("DeleteSnapshot", func(t *testing.T) {
+ snapshots, err := client.ListSnapshots(ctx, collectionName)
+ require.NoError(t, err)
+ require.NotEmpty(t, snapshots)
+
+ err = client.DeleteSnapshot(ctx, collectionName, snapshots[0].GetName())
+ require.NoError(t, err)
+ })
+
+ t.Run("CreateFullSnapshot", func(t *testing.T) {
+ snapshot, err := client.CreateFullSnapshot(ctx)
+ require.NoError(t, err)
+ require.NotNil(t, snapshot)
+ })
+
+ t.Run("ListFullSnapshots", func(t *testing.T) {
+ snapshots, err := client.ListFullSnapshots(ctx)
+ require.NoError(t, err)
+ require.NotEmpty(t, snapshots)
+ })
+
+ t.Run("DeleteFullSnapshot", func(t *testing.T) {
+ snapshots, err := client.ListFullSnapshots(ctx)
+ require.NoError(t, err)
+ require.NotEmpty(t, snapshots)
+
+ err = client.DeleteFullSnapshot(ctx, snapshots[0].GetName())
+ require.NoError(t, err)
+ })
+
+ err = client.DeleteCollection(ctx, collectionName)
+ require.NoError(t, err)
+}
diff --git a/tools/generate_proto_go.sh b/tools/generate_proto_go.sh
index a62615b..0d6ae89 100755
--- a/tools/generate_proto_go.sh
+++ b/tools/generate_proto_go.sh
@@ -4,23 +4,14 @@ PROJECT_ROOT="$(pwd)/$(dirname "$0")/../"
QDRANT_PROTO_DIR='proto'
-# go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28
-# go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.2
+go install google.golang.org/protobuf/cmd/protoc-gen-go@1.34.2
+go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.5.1
-# go install github.com/golang/protobuf/protoc-gen-go@v1.5.2
+export PATH="$PATH:$(go env GOPATH)/bin"
-
-GOPATH=$(go env GOPATH)
-
-case ":$PATH:" in
- *":$GOPATH/bin:"*) ;;
- *) export PATH="$GOPATH/bin:$PATH";;
-esac
-
-# SCRIPT_DIR=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
PROTO_DIR=./proto
OUT_DIR=./qdrant
-PACKAGE_NAME=github.com/qdrant/go-client
+PACKAGE_NAME="github.com/qdrant/go-client;qdrant"
protoc \
--experimental_allow_proto3_optional \
diff --git a/tools/sync_proto.sh b/tools/sync_proto.sh
index a436ade..18a545f 100755
--- a/tools/sync_proto.sh
+++ b/tools/sync_proto.sh
@@ -2,11 +2,12 @@
set -e
+BRANCH=${BRANCH:-"master"}
PROJECT_ROOT="$(pwd)/$(dirname "$0")/../"
cd $(mktemp -d)
-git clone --sparse --filter=blob:none --depth=1 git@github.com:qdrant/qdrant.git
+git clone --sparse --branch $BRANCH --filter=blob:none --depth=1 https://github.com/qdrant/qdrant
cd qdrant
git sparse-checkout add lib/api/src/grpc/proto
@@ -37,3 +38,5 @@ sed -i '
# Remove csharp option from proto files
sed -i '/option csharp_namespace = .*/d' $CLIENT_DIR/*.proto
+
+sh tools/generate_proto_go.sh