diff --git a/Cargo.toml b/Cargo.toml index 8fe85a2..607087b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,11 +19,11 @@ anyhow = "1" serde = { version = "1", features = ["derive"], optional = true } serde_json = { version = "1", optional = true } reqwest = { version = "0.11.22", optional = true, default-features = false, features = ["stream", "rustls-tls"] } -futures-util = { version = "0.3.28", optional = true } +futures-util = { version = "0.3.29", optional = true } [dev-dependencies] tonic-build = { version = "0.9.2", features = ["prost"] } -tokio = { version = "1.32.0", features = ["rt-multi-thread"] } +tokio = { version = "1.34.0", features = ["rt-multi-thread"] } [features] default = ["download_snapshots", "serde"] diff --git a/proto/collections.proto b/proto/collections.proto index 093a5a7..92ccb0e 100644 --- a/proto/collections.proto +++ b/proto/collections.proto @@ -1,5 +1,6 @@ syntax = "proto3"; package qdrant; +option csharp_namespace = "Qdrant.Client.Grpc"; message VectorParams { uint64 size = 1; // Size of the vectors @@ -37,6 +38,14 @@ message VectorsConfigDiff { } } +message SparseVectorParams { + optional SparseIndexConfig index = 1; // Configuration of sparse index +} + +message SparseVectorConfig { + map map = 1; +} + message GetCollectionInfoRequest { string collection_name = 1; // Name of the collection } @@ -63,6 +72,7 @@ enum Distance { Cosine = 1; Euclid = 2; Dot = 3; + Manhattan = 4; } enum CollectionStatus { @@ -130,6 +140,18 @@ message HnswConfigDiff { optional uint64 payload_m = 6; } +message SparseIndexConfig { + /* + Prefer a full scan search upto (excluding) this number of vectors. + Note: this is number of vectors, not KiloBytes. + */ + optional uint64 full_scan_threshold = 1; + /* + Store inverted index on disk. If set to false, the index will be stored in RAM. + */ + optional bool on_disk = 2; +} + message WalConfigDiff { optional uint64 wal_capacity_mb = 1; // Size of a single WAL block file optional uint64 wal_segments_ahead = 2; // Number of segments to create in advance @@ -233,6 +255,11 @@ message QuantizationConfigDiff { } } +enum ShardingMethod { + Auto = 0; // Auto-sharding based on record ids + Custom = 1; // Shard by user-defined key +} + message CreateCollection { string collection_name = 1; // Name of the collection reserved 2; // Deprecated @@ -248,6 +275,8 @@ message CreateCollection { optional uint32 write_consistency_factor = 12; // How many replicas should apply the operation for us to consider it successful, default = 1 optional string init_from_collection = 13; // Specify name of the other collection to copy data from optional QuantizationConfig quantization_config = 14; // Quantization configuration of vector + optional ShardingMethod sharding_method = 15; // Sharding method + optional SparseVectorConfig sparse_vectors_config = 16; // Configuration for sparse vectors } message UpdateCollection { @@ -258,6 +287,7 @@ message UpdateCollection { optional HnswConfigDiff hnsw_config = 5; // New HNSW parameters for the collection index optional VectorsConfigDiff vectors_config = 6; // New vector parameters optional QuantizationConfigDiff quantization_config = 7; // Quantization configuration of vector + optional SparseVectorConfig sparse_vectors_config = 8; // New sparse vector parameters } message DeleteCollection { @@ -279,6 +309,8 @@ message CollectionParams { optional uint32 replication_factor = 6; // Number of replicas of each shard that network tries to maintain optional uint32 write_consistency_factor = 7; // How many replicas should apply the operation for us to consider it successful optional uint32 read_fan_out_factor = 8; // Fan-out every read request to these many additional remote nodes (and return first available response) + optional ShardingMethod sharding_method = 9; // Sharding method + optional SparseVectorConfig sparse_vectors_config = 10; // Configuration for sparse vectors } message CollectionParamsDiff { @@ -390,18 +422,28 @@ enum ReplicaState { Partial = 2; // The shard is partially loaded and is currently receiving data from other shards Initializing = 3; // Collection is being created Listener = 4; // A shard which receives data, but is not used for search; Useful for backup shards + PartialSnapshot = 5; // Snapshot shard transfer is in progress; Updates should not be sent to (and are ignored by) the shard +} + +message ShardKey { + oneof key { + string keyword = 1; // String key + uint64 number = 2; // Number key + } } message LocalShardInfo { uint32 shard_id = 1; // Local shard id uint64 points_count = 2; // Number of points in the shard ReplicaState state = 3; // Is replica active + optional ShardKey shard_key = 4; // User-defined shard key } message RemoteShardInfo { uint32 shard_id = 1; // Local shard id uint64 peer_id = 2; // Remote peer id ReplicaState state = 3; // Is replica active + optional ShardKey shard_key = 4; // User-defined shard key } message ShardTransferInfo { @@ -423,6 +465,12 @@ message MoveShard { uint32 shard_id = 1; // Local shard id uint64 from_peer_id = 2; uint64 to_peer_id = 3; + optional ShardTransferMethod method = 4; +} + +enum ShardTransferMethod { + StreamRecords = 0; + Snapshot = 1; } message Replica { @@ -430,6 +478,17 @@ message Replica { uint64 peer_id = 2; } +message CreateShardKey { + ShardKey shard_key = 1; // User-defined shard key + optional uint32 shards_number = 2; // Number of shards to create per shard key + optional uint32 replication_factor = 3; // Number of replicas of each shard to create + repeated uint64 placement = 4; // List of peer ids, allowed to create shards. If empty - all peers are allowed +} + +message DeleteShardKey { + ShardKey shard_key = 1; // Shard key to delete +} + message UpdateCollectionClusterSetupRequest { string collection_name = 1; // Name of the collection oneof operation { @@ -437,6 +496,8 @@ message UpdateCollectionClusterSetupRequest { MoveShard replicate_shard = 3; MoveShard abort_transfer = 4; Replica drop_replica = 5; + CreateShardKey create_shard_key = 7; + DeleteShardKey delete_shard_key = 8; } optional uint64 timeout = 6; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied } @@ -444,3 +505,23 @@ message UpdateCollectionClusterSetupRequest { message UpdateCollectionClusterSetupResponse { bool result = 1; } + +message CreateShardKeyRequest { + string collection_name = 1; // Name of the collection + CreateShardKey request = 2; // Request to create shard key + optional uint64 timeout = 3; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied +} + +message DeleteShardKeyRequest { + string collection_name = 1; // Name of the collection + DeleteShardKey request = 2; // Request to delete shard key + optional uint64 timeout = 3; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied +} + +message CreateShardKeyResponse { + bool result = 1; +} + +message DeleteShardKeyResponse { + bool result = 1; +} diff --git a/proto/collections_service.proto b/proto/collections_service.proto index 2297027..723124e 100644 --- a/proto/collections_service.proto +++ b/proto/collections_service.proto @@ -3,6 +3,7 @@ syntax = "proto3"; import "collections.proto"; package qdrant; +option csharp_namespace = "Qdrant.Client.Grpc"; service Collections { /* @@ -45,4 +46,12 @@ service Collections { Update cluster setup for a collection */ rpc UpdateCollectionClusterSetup (UpdateCollectionClusterSetupRequest) returns (UpdateCollectionClusterSetupResponse) {} + /* + Create shard key + */ + rpc CreateShardKey (CreateShardKeyRequest) returns (CreateShardKeyResponse) {} + /* + Delete shard key + */ + rpc DeleteShardKey (DeleteShardKeyRequest) returns (DeleteShardKeyResponse) {} } diff --git a/proto/json_with_int.proto b/proto/json_with_int.proto index 3fc496e..283fdc3 100644 --- a/proto/json_with_int.proto +++ b/proto/json_with_int.proto @@ -3,6 +3,7 @@ syntax = "proto3"; package qdrant; +option csharp_namespace = "Qdrant.Client.Grpc"; // `Struct` represents a structured data value, consisting of fields // which map to dynamically typed values. In some languages, `Struct` diff --git a/proto/points.proto b/proto/points.proto index 43a7e32..f95a4d3 100644 --- a/proto/points.proto +++ b/proto/points.proto @@ -1,6 +1,7 @@ syntax = "proto3"; package qdrant; +option csharp_namespace = "Qdrant.Client.Grpc"; import "json_with_int.proto"; import "collections.proto"; @@ -40,10 +41,23 @@ message PointId { } } +message SparseIndices { + repeated uint32 data = 1; +} + message Vector { repeated float data = 1; + optional SparseIndices indices = 2; +} + +// --------------------------------------------- +// ----------------- ShardKeySelector ---------- +// --------------------------------------------- +message ShardKeySelector { + repeated ShardKey shard_keys = 1; // List of shard keys which should be used in the request } + // --------------------------------------------- // ---------------- RPC Requests --------------- // --------------------------------------------- @@ -53,6 +67,7 @@ message UpsertPoints { optional bool wait = 2; // Wait until the changes have been applied? repeated PointStruct points = 3; optional WriteOrdering ordering = 4; // Write ordering guarantees + optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys } message DeletePoints { @@ -60,6 +75,7 @@ message DeletePoints { optional bool wait = 2; // Wait until the changes have been applied? PointsSelector points = 3; // Affected points optional WriteOrdering ordering = 4; // Write ordering guarantees + optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys } message GetPoints { @@ -69,6 +85,7 @@ message GetPoints { WithPayloadSelector with_payload = 4; // Options for specifying which payload to include or not optional WithVectorsSelector with_vectors = 5; // Options for specifying which vectors to include into response optional ReadConsistency read_consistency = 6; // Options for specifying read consistency guarantees + optional ShardKeySelector shard_key_selector = 7; // Specify in which shards to look for the points, if not specified - look in all shards } message UpdatePointVectors { @@ -76,6 +93,7 @@ message UpdatePointVectors { optional bool wait = 2; // Wait until the changes have been applied? repeated PointVectors points = 3; // List of points and vectors to update optional WriteOrdering ordering = 4; // Write ordering guarantees + optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys } message PointVectors { @@ -89,6 +107,7 @@ message DeletePointVectors { PointsSelector points_selector = 3; // Affected points VectorsSelector vectors = 4; // List of vector names to delete optional WriteOrdering ordering = 5; // Write ordering guarantees + optional ShardKeySelector shard_key_selector = 6; // Option for custom sharding to specify used shard keys } message SetPayloadPoints { @@ -98,6 +117,7 @@ message SetPayloadPoints { reserved 4; // List of point to modify, deprecated optional PointsSelector points_selector = 5; // Affected points optional WriteOrdering ordering = 6; // Write ordering guarantees + optional ShardKeySelector shard_key_selector = 7; // Option for custom sharding to specify used shard keys } message DeletePayloadPoints { @@ -107,6 +127,7 @@ message DeletePayloadPoints { reserved 4; // Affected points, deprecated optional PointsSelector points_selector = 5; // Affected points optional WriteOrdering ordering = 6; // Write ordering guarantees + optional ShardKeySelector shard_key_selector = 7; // Option for custom sharding to specify used shard keys } message ClearPayloadPoints { @@ -114,6 +135,7 @@ message ClearPayloadPoints { optional bool wait = 2; // Wait until the changes have been applied? PointsSelector points = 3; // Affected points optional WriteOrdering ordering = 4; // Write ordering guarantees + optional ShardKeySelector shard_key_selector = 5; // Option for custom sharding to specify used shard keys } enum FieldType { @@ -239,12 +261,16 @@ message SearchPoints { optional string vector_name = 10; // Which vector to use for search, if not specified - use default vector optional WithVectorsSelector with_vectors = 11; // Options for specifying which vectors to include into response optional ReadConsistency read_consistency = 12; // Options for specifying read consistency guarantees + optional uint64 timeout = 13; // If set, overrides global timeout setting for this request. Unit is seconds. + optional ShardKeySelector shard_key_selector = 14; // Specify in which shards to look for the points, if not specified - look in all shards + optional SparseIndices sparse_indices = 15; } message SearchBatchPoints { string collection_name = 1; // Name of the collection repeated SearchPoints search_points = 2; optional ReadConsistency read_consistency = 3; // Options for specifying read consistency guarantees + optional uint64 timeout = 4; // If set, overrides global timeout setting for this request. Unit is seconds. } message WithLookup { @@ -268,6 +294,9 @@ message SearchPointGroups { uint32 group_size = 11; // Maximum amount of points to return per group optional ReadConsistency read_consistency = 12; // Options for specifying read consistency guarantees optional WithLookup with_lookup = 13; // Options for specifying how to use the group id to lookup points in another collection + optional uint64 timeout = 14; // If set, overrides global timeout setting for this request. Unit is seconds. + optional ShardKeySelector shard_key_selector = 15; // Specify in which shards to look for the points, if not specified - look in all shards + optional SparseIndices sparse_indices = 16; } message ScrollPoints { @@ -279,6 +308,7 @@ message ScrollPoints { WithPayloadSelector with_payload = 6; // Options for specifying which payload to include or not optional WithVectorsSelector with_vectors = 7; // Options for specifying which vectors to include into response optional ReadConsistency read_consistency = 8; // Options for specifying read consistency guarantees + optional ShardKeySelector shard_key_selector = 9; // Specify in which shards to look for the points, if not specified - look in all shards } // How to use positive and negative vectors to find the results, default is `AverageVector`: @@ -296,6 +326,7 @@ enum RecommendStrategy { message LookupLocation { string collection_name = 1; optional string vector_name = 2; // Which vector to use for search, if not specified - use default vector + optional ShardKeySelector shard_key_selector = 3; // Specify in which shards to look for the points, if not specified - look in all shards } message RecommendPoints { @@ -316,12 +347,15 @@ message RecommendPoints { optional RecommendStrategy strategy = 16; // How to use the example vectors to find the results repeated Vector positive_vectors = 17; // Look for vectors closest to those repeated Vector negative_vectors = 18; // Try to avoid vectors like this + optional uint64 timeout = 19; // If set, overrides global timeout setting for this request. Unit is seconds. + optional ShardKeySelector shard_key_selector = 20; // Specify in which shards to look for the points, if not specified - look in all shards } message RecommendBatchPoints { string collection_name = 1; // Name of the collection repeated RecommendPoints recommend_points = 2; optional ReadConsistency read_consistency = 3; // Options for specifying read consistency guarantees + optional uint64 timeout = 4; // If set, overrides global timeout setting for this request. Unit is seconds. } message RecommendPointGroups { @@ -343,43 +377,106 @@ message RecommendPointGroups { optional RecommendStrategy strategy = 17; // How to use the example vectors to find the results repeated Vector positive_vectors = 18; // Look for vectors closest to those repeated Vector negative_vectors = 19; // Try to avoid vectors like this + optional uint64 timeout = 20; // If set, overrides global timeout setting for this request. Unit is seconds. + optional ShardKeySelector shard_key_selector = 21; // Specify in which shards to look for the points, if not specified - look in all shards +} + +message TargetVector { + oneof target { + VectorExample single = 1; + + // leaving extensibility for possibly adding multi-target + } +} + +message VectorExample { + oneof example { + PointId id = 1; + Vector vector = 2; + } +} + +message ContextExamplePair { + VectorExample positive = 1; + VectorExample negative = 2; +} + +message DiscoverPoints { + string collection_name = 1; // name of the collection + TargetVector target = 2; // Use this as the primary search objective + repeated ContextExamplePair context = 3; // Search will be constrained by these pairs of examples + Filter filter = 4; // Filter conditions - return only those points that satisfy the specified conditions + uint64 limit = 5; // Max number of result + WithPayloadSelector with_payload = 6; // Options for specifying which payload to include or not + SearchParams params = 7; // Search config + optional uint64 offset = 8; // Offset of the result + optional string using = 9; // Define which vector to use for recommendation, if not specified - default vector + optional WithVectorsSelector with_vectors = 10; // Options for specifying which vectors to include into response + optional LookupLocation lookup_from = 11; // Name of the collection to use for points lookup, if not specified - use current collection + optional ReadConsistency read_consistency = 12; // Options for specifying read consistency guarantees + optional uint64 timeout = 13; // If set, overrides global timeout setting for this request. Unit is seconds. + optional ShardKeySelector shard_key_selector = 14; // Specify in which shards to look for the points, if not specified - look in all shards +} + +message DiscoverBatchPoints { + string collection_name = 1; // Name of the collection + repeated DiscoverPoints discover_points = 2; + optional ReadConsistency read_consistency = 3; // Options for specifying read consistency guarantees + optional uint64 timeout = 4; // If set, overrides global timeout setting for this request. Unit is seconds. } message CountPoints { string collection_name = 1; // name of the collection Filter filter = 2; // Filter conditions - return only those points that satisfy the specified conditions optional bool exact = 3; // If `true` - return exact count, if `false` - return approximate count + optional ReadConsistency read_consistency = 4; // Options for specifying read consistency guarantees + optional ShardKeySelector shard_key_selector = 5; // Specify in which shards to look for the points, if not specified - look in all shards } message PointsUpdateOperation { message PointStructList { repeated PointStruct points = 1; + optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys } message SetPayload { map payload = 1; optional PointsSelector points_selector = 2; // Affected points + optional ShardKeySelector shard_key_selector = 3; // Option for custom sharding to specify used shard keys } message DeletePayload { repeated string keys = 1; optional PointsSelector points_selector = 2; // Affected points + optional ShardKeySelector shard_key_selector = 3; // Option for custom sharding to specify used shard keys } message UpdateVectors { repeated PointVectors points = 1; // List of points and vectors to update + optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys } message DeleteVectors { PointsSelector points_selector = 1; // Affected points VectorsSelector vectors = 2; // List of vector names to delete + optional ShardKeySelector shard_key_selector = 3; // Option for custom sharding to specify used shard keys + } + message DeletePoints { + PointsSelector points = 1; // Affected points + optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys + } + message ClearPayload { + PointsSelector points = 1; // Affected points + optional ShardKeySelector shard_key_selector = 2; // Option for custom sharding to specify used shard keys } oneof operation { PointStructList upsert = 1; - PointsSelector delete = 2; + PointsSelector delete_deprecated = 2 [deprecated=true]; SetPayload set_payload = 3; SetPayload overwrite_payload = 4; DeletePayload delete_payload = 5; - PointsSelector clear_payload = 6; + PointsSelector clear_payload_deprecated = 6 [deprecated=true]; UpdateVectors update_vectors = 7; DeleteVectors delete_vectors = 8; + DeletePoints delete_points = 9; + ClearPayload clear_payload = 10; } } @@ -400,7 +497,7 @@ message PointsOperationResponse { } message UpdateResult { - uint64 operation_id = 1; // Number of operation + optional uint64 operation_id = 1; // Number of operation UpdateStatus status = 2; // Operation status } @@ -417,6 +514,7 @@ message ScoredPoint { reserved 4; // deprecated "vector" field uint64 version = 5; // Last update operation applied to this point optional Vectors vectors = 6; // Vectors to search + optional ShardKey shard_key = 7; // Shard key } message GroupId { @@ -479,6 +577,7 @@ message RetrievedPoint { map payload = 2; reserved 3; // deprecated "vector" field optional Vectors vectors = 4; + optional ShardKey shard_key = 5; // Shard key } message GetResponse { @@ -496,6 +595,16 @@ message RecommendBatchResponse { double time = 2; // Time spent to process } +message DiscoverResponse { + repeated ScoredPoint result = 1; + double time = 2; // Time spent to process +} + +message DiscoverBatchResponse { + repeated BatchResult result = 1; + double time = 2; // Time spent to process +} + message RecommendGroupsResponse { GroupsResult result = 1; double time = 2; // Time spent to process diff --git a/proto/points_service.proto b/proto/points_service.proto index 5921777..ddfdc62 100644 --- a/proto/points_service.proto +++ b/proto/points_service.proto @@ -3,8 +3,7 @@ syntax = "proto3"; import "points.proto"; package qdrant; - -import "google/protobuf/struct.proto"; +option csharp_namespace = "Qdrant.Client.Grpc"; service Points { /* @@ -75,10 +74,32 @@ service Points { Look for the points which are closer to stored positive examples and at the same time further to negative examples. */ rpc RecommendBatch (RecommendBatchPoints) returns (RecommendBatchResponse) {} - /* + /* Look for the points which are closer to stored positive examples and at the same time further to negative examples, grouped by a given field */ rpc RecommendGroups (RecommendPointGroups) returns (RecommendGroupsResponse) {} + /* + Use context and a target to find the most similar points to the target, constrained by the context. + + When using only the context (without a target), a special search - called context search - is performed where + pairs of points are used to generate a loss that guides the search towards the zone where + most positive examples overlap. This means that the score minimizes the scenario of + finding a point closer to a negative than to a positive part of a pair. + + Since the score of a context relates to loss, the maximum score a point can get is 0.0, + and it becomes normal that many points can have a score of 0.0. + + When using target (with or without context), the score behaves a little different: The + integer part of the score represents the rank with respect to the context, while the + decimal part of the score relates to the distance to the target. The context part of the score for + each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, + and -1 otherwise. + */ + rpc Discover (DiscoverPoints) returns (DiscoverResponse) {} + /* + Batch request points based on { positive, negative } pairs of examples, and/or a target + */ + rpc DiscoverBatch (DiscoverBatchPoints) returns (DiscoverBatchResponse) {} /* Count points in collection with given filtering conditions */ diff --git a/proto/qdrant.proto b/proto/qdrant.proto index ccbfa60..4f58399 100644 --- a/proto/qdrant.proto +++ b/proto/qdrant.proto @@ -5,6 +5,7 @@ import "points_service.proto"; import "snapshots_service.proto"; package qdrant; +option csharp_namespace = "Qdrant.Client.Grpc"; service Qdrant { rpc HealthCheck (HealthCheckRequest) returns (HealthCheckReply) {} diff --git a/proto/snapshots_service.proto b/proto/snapshots_service.proto index 5770268..5127b28 100644 --- a/proto/snapshots_service.proto +++ b/proto/snapshots_service.proto @@ -1,8 +1,8 @@ syntax = "proto3"; package qdrant; +option csharp_namespace = "Qdrant.Client.Grpc"; -import "google/protobuf/struct.proto"; import "google/protobuf/timestamp.proto"; service Snapshots { @@ -15,7 +15,7 @@ service Snapshots { */ rpc List (ListSnapshotsRequest) returns (ListSnapshotsResponse) {} /* - Delete collection snapshots + Delete collection snapshot */ rpc Delete (DeleteSnapshotRequest) returns (DeleteSnapshotResponse) {} /* @@ -27,10 +27,9 @@ service Snapshots { */ rpc ListFull (ListFullSnapshotsRequest) returns (ListSnapshotsResponse) {} /* - List full storage snapshots + Delete full storage snapshot */ rpc DeleteFull (DeleteFullSnapshotRequest) returns (DeleteSnapshotResponse) {} - } message CreateFullSnapshotRequest {} diff --git a/src/client.rs b/src/client.rs index 8a47df2..3ea809a 100644 --- a/src/client.rs +++ b/src/client.rs @@ -25,10 +25,10 @@ use crate::qdrant::{ RecommendBatchPoints, RecommendBatchResponse, RecommendGroupsResponse, RecommendPointGroups, RecommendPoints, RecommendResponse, RenameAlias, ScrollPoints, ScrollResponse, SearchBatchPoints, SearchBatchResponse, SearchGroupsResponse, SearchPointGroups, SearchPoints, - SearchResponse, SetPayloadPoints, Struct, UpdateBatchPoints, UpdateBatchResponse, - UpdateCollection, UpdateCollectionClusterSetupRequest, UpdateCollectionClusterSetupResponse, - UpdatePointVectors, UpsertPoints, Value, Vector, Vectors, VectorsSelector, WithPayloadSelector, - WithVectorsSelector, WriteOrdering, + SearchResponse, SetPayloadPoints, SparseIndices, Struct, UpdateBatchPoints, + UpdateBatchResponse, UpdateCollection, UpdateCollectionClusterSetupRequest, + UpdateCollectionClusterSetupResponse, UpdatePointVectors, UpsertPoints, Value, Vector, Vectors, + VectorsSelector, WithPayloadSelector, WithVectorsSelector, WriteOrdering, }; use anyhow::Result; #[cfg(feature = "serde")] @@ -216,7 +216,25 @@ impl From> for PointsSelector { impl From> for Vector { fn from(vector: Vec) -> Self { - Vector { data: vector } + Vector { + data: vector, + indices: None, + } + } +} + +impl From> for Vector { + fn from(tuples: Vec<(u32, f32)>) -> Self { + let mut indices = Vec::with_capacity(tuples.len()); + let mut values = Vec::with_capacity(tuples.len()); + for (i, w) in tuples { + indices.push(i); + values.push(w); + } + Vector { + data: values, + indices: Some(SparseIndices { data: indices }), + } } } @@ -233,6 +251,29 @@ impl From>> for Vectors { } } +impl From> for Vectors { + fn from(named_vectors: HashMap) -> Self { + Vectors { + vectors_options: Some(VectorsOptions::Vectors(NamedVectors { + vectors: named_vectors.into_iter().map(|(k, v)| (k, v)).collect(), + })), + } + } +} + +impl From>> for Vectors { + fn from(named_vectors: HashMap>) -> Self { + Vectors { + vectors_options: Some(VectorsOptions::Vectors(NamedVectors { + vectors: named_vectors + .into_iter() + .map(|(k, v)| (k, v.into())) + .collect(), + })), + } + } +} + impl From> for Vectors { fn from(vector: Vec) -> Self { Vectors { @@ -460,6 +501,7 @@ impl QdrantClient { hnsw_config: None, vectors_config: None, quantization_config: None, + sparse_vectors_config: None, }) .await?; @@ -738,6 +780,7 @@ impl QdrantClient { wait: Some(block), points: points.to_vec(), ordering: ordering_ref.cloned(), + shard_key_selector: None, }) .await? .into_inner()) @@ -804,6 +847,7 @@ impl QdrantClient { wait: Some(block), points: chunk.to_vec(), ordering: ordering_ref.cloned(), + shard_key_selector: None, }) .await? .into_inner(); @@ -859,6 +903,7 @@ impl QdrantClient { payload: payload.0.clone(), points_selector: Some(points.clone()), ordering: ordering_ref.cloned(), + shard_key_selector: None, }) .await?; Ok(result.into_inner()) @@ -910,6 +955,7 @@ impl QdrantClient { payload: payload.0.clone(), points_selector: Some(points.clone()), ordering: ordering_ref.cloned(), + shard_key_selector: None, }) .await?; Ok(result.into_inner()) @@ -961,6 +1007,7 @@ impl QdrantClient { keys: keys.to_owned(), points_selector: Some(points.clone()), ordering: ordering_ref.cloned(), + shard_key_selector: None, }) .await?; Ok(result.into_inner()) @@ -1008,6 +1055,7 @@ impl QdrantClient { wait: Some(block), points: points_selector.cloned(), ordering: ordering_ref.cloned(), + shard_key_selector: None, }) .await?; Ok(result.into_inner()) @@ -1042,6 +1090,7 @@ impl QdrantClient { with_payload: with_payload_ref.cloned(), with_vectors: with_vectors_ref.cloned(), read_consistency: read_consistency_ref.cloned(), + shard_key_selector: None, }) .await?; @@ -1119,6 +1168,7 @@ impl QdrantClient { wait: Some(blocking), points: Some(points.clone()), ordering: ordering_ref.cloned(), + shard_key_selector: None, }) .await?; Ok(result.into_inner()) @@ -1181,6 +1231,7 @@ impl QdrantClient { points_selector: Some(points_selector.clone()), vectors: Some(vector_selector.clone()), ordering: ordering_ref.cloned(), + shard_key_selector: None, }) .await?; Ok(result.into_inner()) @@ -1227,6 +1278,7 @@ impl QdrantClient { wait: Some(blocking), points: points.to_owned(), ordering: ordering_ref.cloned(), + shard_key_selector: None, }) .await?; Ok(result.into_inner()) diff --git a/src/lib.rs b/src/lib.rs index 362ca88..700eb56 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -520,6 +520,9 @@ mod tests { vector_name: None, with_vectors: None, read_consistency: None, + timeout: None, + shard_key_selector: None, + sparse_indices: None, }) .await?; diff --git a/src/qdrant.rs b/src/qdrant.rs index 496ee31..077121d 100644 --- a/src/qdrant.rs +++ b/src/qdrant.rs @@ -81,6 +81,22 @@ pub mod vectors_config_diff { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct SparseVectorParams { + /// Configuration of sparse index + #[prost(message, optional, tag = "1")] + pub index: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SparseVectorConfig { + #[prost(map = "string, message", tag = "1")] + pub map: ::std::collections::HashMap< + ::prost::alloc::string::String, + SparseVectorParams, + >, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct GetCollectionInfoRequest { /// Name of the collection #[prost(string, tag = "1")] @@ -155,6 +171,19 @@ pub struct HnswConfigDiff { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct SparseIndexConfig { + /// + /// Prefer a full scan search upto (excluding) this number of vectors. + /// Note: this is number of vectors, not KiloBytes. + #[prost(uint64, optional, tag = "1")] + pub full_scan_threshold: ::core::option::Option, + /// + /// Store inverted index on disk. If set to false, the index will be stored in RAM. + #[prost(bool, optional, tag = "2")] + pub on_disk: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct WalConfigDiff { /// Size of a single WAL block file #[prost(uint64, optional, tag = "1")] @@ -338,6 +367,12 @@ pub struct CreateCollection { /// Quantization configuration of vector #[prost(message, optional, tag = "14")] pub quantization_config: ::core::option::Option, + /// Sharding method + #[prost(enumeration = "ShardingMethod", optional, tag = "15")] + pub sharding_method: ::core::option::Option, + /// Configuration for sparse vectors + #[prost(message, optional, tag = "16")] + pub sparse_vectors_config: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -363,6 +398,9 @@ pub struct UpdateCollection { /// Quantization configuration of vector #[prost(message, optional, tag = "7")] pub quantization_config: ::core::option::Option, + /// New sparse vector parameters + #[prost(message, optional, tag = "8")] + pub sparse_vectors_config: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -405,6 +443,12 @@ pub struct CollectionParams { /// Fan-out every read request to these many additional remote nodes (and return first available response) #[prost(uint32, optional, tag = "8")] pub read_fan_out_factor: ::core::option::Option, + /// Sharding method + #[prost(enumeration = "ShardingMethod", optional, tag = "9")] + pub sharding_method: ::core::option::Option, + /// Configuration for sparse vectors + #[prost(message, optional, tag = "10")] + pub sparse_vectors_config: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -611,6 +655,25 @@ pub struct CollectionClusterInfoRequest { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct ShardKey { + #[prost(oneof = "shard_key::Key", tags = "1, 2")] + pub key: ::core::option::Option, +} +/// Nested message and enum types in `ShardKey`. +pub mod shard_key { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Key { + /// String key + #[prost(string, tag = "1")] + Keyword(::prost::alloc::string::String), + /// Number key + #[prost(uint64, tag = "2")] + Number(u64), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct LocalShardInfo { /// Local shard id #[prost(uint32, tag = "1")] @@ -621,6 +684,9 @@ pub struct LocalShardInfo { /// Is replica active #[prost(enumeration = "ReplicaState", tag = "3")] pub state: i32, + /// User-defined shard key + #[prost(message, optional, tag = "4")] + pub shard_key: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -634,6 +700,9 @@ pub struct RemoteShardInfo { /// Is replica active #[prost(enumeration = "ReplicaState", tag = "3")] pub state: i32, + /// User-defined shard key + #[prost(message, optional, tag = "4")] + pub shard_key: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -678,6 +747,8 @@ pub struct MoveShard { pub from_peer_id: u64, #[prost(uint64, tag = "3")] pub to_peer_id: u64, + #[prost(enumeration = "ShardTransferMethod", optional, tag = "4")] + pub method: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -689,6 +760,29 @@ pub struct Replica { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateShardKey { + /// User-defined shard key + #[prost(message, optional, tag = "1")] + pub shard_key: ::core::option::Option, + /// Number of shards to create per shard key + #[prost(uint32, optional, tag = "2")] + pub shards_number: ::core::option::Option, + /// Number of replicas of each shard to create + #[prost(uint32, optional, tag = "3")] + pub replication_factor: ::core::option::Option, + /// List of peer ids, allowed to create shards. If empty - all peers are allowed + #[prost(uint64, repeated, tag = "4")] + pub placement: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteShardKey { + /// Shard key to delete + #[prost(message, optional, tag = "1")] + pub shard_key: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct UpdateCollectionClusterSetupRequest { /// Name of the collection #[prost(string, tag = "1")] @@ -698,7 +792,7 @@ pub struct UpdateCollectionClusterSetupRequest { pub timeout: ::core::option::Option, #[prost( oneof = "update_collection_cluster_setup_request::Operation", - tags = "2, 3, 4, 5" + tags = "2, 3, 4, 5, 7, 8" )] pub operation: ::core::option::Option< update_collection_cluster_setup_request::Operation, @@ -717,6 +811,10 @@ pub mod update_collection_cluster_setup_request { AbortTransfer(super::MoveShard), #[prost(message, tag = "5")] DropReplica(super::Replica), + #[prost(message, tag = "7")] + CreateShardKey(super::CreateShardKey), + #[prost(message, tag = "8")] + DeleteShardKey(super::DeleteShardKey), } } #[allow(clippy::derive_partial_eq_without_eq)] @@ -725,6 +823,44 @@ pub struct UpdateCollectionClusterSetupResponse { #[prost(bool, tag = "1")] pub result: bool, } +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateShardKeyRequest { + /// Name of the collection + #[prost(string, tag = "1")] + pub collection_name: ::prost::alloc::string::String, + /// Request to create shard key + #[prost(message, optional, tag = "2")] + pub request: ::core::option::Option, + /// Wait timeout for operation commit in seconds, if not specified - default value will be supplied + #[prost(uint64, optional, tag = "3")] + pub timeout: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteShardKeyRequest { + /// Name of the collection + #[prost(string, tag = "1")] + pub collection_name: ::prost::alloc::string::String, + /// Request to delete shard key + #[prost(message, optional, tag = "2")] + pub request: ::core::option::Option, + /// Wait timeout for operation commit in seconds, if not specified - default value will be supplied + #[prost(uint64, optional, tag = "3")] + pub timeout: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CreateShardKeyResponse { + #[prost(bool, tag = "1")] + pub result: bool, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DeleteShardKeyResponse { + #[prost(bool, tag = "1")] + pub result: bool, +} #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum Distance { @@ -732,6 +868,7 @@ pub enum Distance { Cosine = 1, Euclid = 2, Dot = 3, + Manhattan = 4, } impl Distance { /// String value of the enum field names used in the ProtoBuf definition. @@ -744,6 +881,7 @@ impl Distance { Distance::Cosine => "Cosine", Distance::Euclid => "Euclid", Distance::Dot => "Dot", + Distance::Manhattan => "Manhattan", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -753,6 +891,7 @@ impl Distance { "Cosine" => Some(Self::Cosine), "Euclid" => Some(Self::Euclid), "Dot" => Some(Self::Dot), + "Manhattan" => Some(Self::Manhattan), _ => None, } } @@ -896,6 +1035,34 @@ impl CompressionRatio { } #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] +pub enum ShardingMethod { + /// Auto-sharding based on record ids + Auto = 0, + /// Shard by user-defined key + Custom = 1, +} +impl ShardingMethod { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ShardingMethod::Auto => "Auto", + ShardingMethod::Custom => "Custom", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "Auto" => Some(Self::Auto), + "Custom" => Some(Self::Custom), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] pub enum TokenizerType { Unknown = 0, Prefix = 1, @@ -942,6 +1109,8 @@ pub enum ReplicaState { Initializing = 3, /// A shard which receives data, but is not used for search; Useful for backup shards Listener = 4, + /// Snapshot shard transfer is in progress; Updates should not be sent to (and are ignored by) the shard + PartialSnapshot = 5, } impl ReplicaState { /// String value of the enum field names used in the ProtoBuf definition. @@ -955,6 +1124,7 @@ impl ReplicaState { ReplicaState::Partial => "Partial", ReplicaState::Initializing => "Initializing", ReplicaState::Listener => "Listener", + ReplicaState::PartialSnapshot => "PartialSnapshot", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -965,6 +1135,33 @@ impl ReplicaState { "Partial" => Some(Self::Partial), "Initializing" => Some(Self::Initializing), "Listener" => Some(Self::Listener), + "PartialSnapshot" => Some(Self::PartialSnapshot), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum ShardTransferMethod { + StreamRecords = 0, + Snapshot = 1, +} +impl ShardTransferMethod { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + ShardTransferMethod::StreamRecords => "StreamRecords", + ShardTransferMethod::Snapshot => "Snapshot", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "StreamRecords" => Some(Self::StreamRecords), + "Snapshot" => Some(Self::Snapshot), _ => None, } } @@ -1317,6 +1514,60 @@ pub mod collections_client { ); self.inner.unary(req, path, codec).await } + /// + /// Create shard key + pub async fn create_shard_key( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/qdrant.Collections/CreateShardKey", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("qdrant.Collections", "CreateShardKey")); + self.inner.unary(req, path, codec).await + } + /// + /// Delete shard key + pub async fn delete_shard_key( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/qdrant.Collections/DeleteShardKey", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("qdrant.Collections", "DeleteShardKey")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -1416,6 +1667,24 @@ pub mod collections_server { tonic::Response, tonic::Status, >; + /// + /// Create shard key + async fn create_shard_key( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// + /// Delete shard key + async fn delete_shard_key( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } #[derive(Debug)] pub struct CollectionsServer { @@ -1949,6 +2218,98 @@ pub mod collections_server { }; Box::pin(fut) } + "/qdrant.Collections/CreateShardKey" => { + #[allow(non_camel_case_types)] + struct CreateShardKeySvc(pub Arc); + impl< + T: Collections, + > tonic::server::UnaryService + for CreateShardKeySvc { + type Response = super::CreateShardKeyResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).create_shard_key(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CreateShardKeySvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/qdrant.Collections/DeleteShardKey" => { + #[allow(non_camel_case_types)] + struct DeleteShardKeySvc(pub Arc); + impl< + T: Collections, + > tonic::server::UnaryService + for DeleteShardKeySvc { + type Response = super::DeleteShardKeyResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).delete_shard_key(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DeleteShardKeySvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { Ok( @@ -2132,9 +2493,27 @@ pub mod point_id { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct SparseIndices { + #[prost(uint32, repeated, tag = "1")] + pub data: ::prost::alloc::vec::Vec, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct Vector { #[prost(float, repeated, tag = "1")] pub data: ::prost::alloc::vec::Vec, + #[prost(message, optional, tag = "2")] + pub indices: ::core::option::Option, +} +/// --------------------------------------------- +/// ----------------- ShardKeySelector ---------- +/// --------------------------------------------- +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ShardKeySelector { + /// List of shard keys which should be used in the request + #[prost(message, repeated, tag = "1")] + pub shard_keys: ::prost::alloc::vec::Vec, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2150,6 +2529,9 @@ pub struct UpsertPoints { /// Write ordering guarantees #[prost(message, optional, tag = "4")] pub ordering: ::core::option::Option, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "5")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2166,6 +2548,9 @@ pub struct DeletePoints { /// Write ordering guarantees #[prost(message, optional, tag = "4")] pub ordering: ::core::option::Option, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "5")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2185,6 +2570,9 @@ pub struct GetPoints { /// Options for specifying read consistency guarantees #[prost(message, optional, tag = "6")] pub read_consistency: ::core::option::Option, + /// Specify in which shards to look for the points, if not specified - look in all shards + #[prost(message, optional, tag = "7")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2201,6 +2589,9 @@ pub struct UpdatePointVectors { /// Write ordering guarantees #[prost(message, optional, tag = "4")] pub ordering: ::core::option::Option, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "5")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2230,6 +2621,9 @@ pub struct DeletePointVectors { /// Write ordering guarantees #[prost(message, optional, tag = "5")] pub ordering: ::core::option::Option, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "6")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2249,6 +2643,9 @@ pub struct SetPayloadPoints { /// Write ordering guarantees #[prost(message, optional, tag = "6")] pub ordering: ::core::option::Option, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "7")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2268,6 +2665,9 @@ pub struct DeletePayloadPoints { /// Write ordering guarantees #[prost(message, optional, tag = "6")] pub ordering: ::core::option::Option, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "7")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2284,6 +2684,9 @@ pub struct ClearPayloadPoints { /// Write ordering guarantees #[prost(message, optional, tag = "4")] pub ordering: ::core::option::Option, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "5")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2487,6 +2890,14 @@ pub struct SearchPoints { /// Options for specifying read consistency guarantees #[prost(message, optional, tag = "12")] pub read_consistency: ::core::option::Option, + /// If set, overrides global timeout setting for this request. Unit is seconds. + #[prost(uint64, optional, tag = "13")] + pub timeout: ::core::option::Option, + /// Specify in which shards to look for the points, if not specified - look in all shards + #[prost(message, optional, tag = "14")] + pub shard_key_selector: ::core::option::Option, + #[prost(message, optional, tag = "15")] + pub sparse_indices: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2499,6 +2910,9 @@ pub struct SearchBatchPoints { /// Options for specifying read consistency guarantees #[prost(message, optional, tag = "3")] pub read_consistency: ::core::option::Option, + /// If set, overrides global timeout setting for this request. Unit is seconds. + #[prost(uint64, optional, tag = "4")] + pub timeout: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2555,6 +2969,14 @@ pub struct SearchPointGroups { /// Options for specifying how to use the group id to lookup points in another collection #[prost(message, optional, tag = "13")] pub with_lookup: ::core::option::Option, + /// If set, overrides global timeout setting for this request. Unit is seconds. + #[prost(uint64, optional, tag = "14")] + pub timeout: ::core::option::Option, + /// Specify in which shards to look for the points, if not specified - look in all shards + #[prost(message, optional, tag = "15")] + pub shard_key_selector: ::core::option::Option, + #[prost(message, optional, tag = "16")] + pub sparse_indices: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2579,6 +3001,9 @@ pub struct ScrollPoints { /// Options for specifying read consistency guarantees #[prost(message, optional, tag = "8")] pub read_consistency: ::core::option::Option, + /// Specify in which shards to look for the points, if not specified - look in all shards + #[prost(message, optional, tag = "9")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2588,6 +3013,9 @@ pub struct LookupLocation { /// Which vector to use for search, if not specified - use default vector #[prost(string, optional, tag = "2")] pub vector_name: ::core::option::Option<::prost::alloc::string::String>, + /// Specify in which shards to look for the points, if not specified - look in all shards + #[prost(message, optional, tag = "3")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2640,6 +3068,12 @@ pub struct RecommendPoints { /// Try to avoid vectors like this #[prost(message, repeated, tag = "18")] pub negative_vectors: ::prost::alloc::vec::Vec, + /// If set, overrides global timeout setting for this request. Unit is seconds. + #[prost(uint64, optional, tag = "19")] + pub timeout: ::core::option::Option, + /// Specify in which shards to look for the points, if not specified - look in all shards + #[prost(message, optional, tag = "20")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2652,6 +3086,9 @@ pub struct RecommendBatchPoints { /// Options for specifying read consistency guarantees #[prost(message, optional, tag = "3")] pub read_consistency: ::core::option::Option, + /// If set, overrides global timeout setting for this request. Unit is seconds. + #[prost(uint64, optional, tag = "4")] + pub timeout: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2710,6 +3147,113 @@ pub struct RecommendPointGroups { /// Try to avoid vectors like this #[prost(message, repeated, tag = "19")] pub negative_vectors: ::prost::alloc::vec::Vec, + /// If set, overrides global timeout setting for this request. Unit is seconds. + #[prost(uint64, optional, tag = "20")] + pub timeout: ::core::option::Option, + /// Specify in which shards to look for the points, if not specified - look in all shards + #[prost(message, optional, tag = "21")] + pub shard_key_selector: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TargetVector { + #[prost(oneof = "target_vector::Target", tags = "1")] + pub target: ::core::option::Option, +} +/// Nested message and enum types in `TargetVector`. +pub mod target_vector { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Target { + #[prost(message, tag = "1")] + Single(super::VectorExample), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct VectorExample { + #[prost(oneof = "vector_example::Example", tags = "1, 2")] + pub example: ::core::option::Option, +} +/// Nested message and enum types in `VectorExample`. +pub mod vector_example { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Example { + #[prost(message, tag = "1")] + Id(super::PointId), + #[prost(message, tag = "2")] + Vector(super::Vector), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct ContextExamplePair { + #[prost(message, optional, tag = "1")] + pub positive: ::core::option::Option, + #[prost(message, optional, tag = "2")] + pub negative: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DiscoverPoints { + /// name of the collection + #[prost(string, tag = "1")] + pub collection_name: ::prost::alloc::string::String, + /// Use this as the primary search objective + #[prost(message, optional, tag = "2")] + pub target: ::core::option::Option, + /// Search will be constrained by these pairs of examples + #[prost(message, repeated, tag = "3")] + pub context: ::prost::alloc::vec::Vec, + /// Filter conditions - return only those points that satisfy the specified conditions + #[prost(message, optional, tag = "4")] + pub filter: ::core::option::Option, + /// Max number of result + #[prost(uint64, tag = "5")] + pub limit: u64, + /// Options for specifying which payload to include or not + #[prost(message, optional, tag = "6")] + pub with_payload: ::core::option::Option, + /// Search config + #[prost(message, optional, tag = "7")] + pub params: ::core::option::Option, + /// Offset of the result + #[prost(uint64, optional, tag = "8")] + pub offset: ::core::option::Option, + /// Define which vector to use for recommendation, if not specified - default vector + #[prost(string, optional, tag = "9")] + pub using: ::core::option::Option<::prost::alloc::string::String>, + /// Options for specifying which vectors to include into response + #[prost(message, optional, tag = "10")] + pub with_vectors: ::core::option::Option, + /// Name of the collection to use for points lookup, if not specified - use current collection + #[prost(message, optional, tag = "11")] + pub lookup_from: ::core::option::Option, + /// Options for specifying read consistency guarantees + #[prost(message, optional, tag = "12")] + pub read_consistency: ::core::option::Option, + /// If set, overrides global timeout setting for this request. Unit is seconds. + #[prost(uint64, optional, tag = "13")] + pub timeout: ::core::option::Option, + /// Specify in which shards to look for the points, if not specified - look in all shards + #[prost(message, optional, tag = "14")] + pub shard_key_selector: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DiscoverBatchPoints { + /// Name of the collection + #[prost(string, tag = "1")] + pub collection_name: ::prost::alloc::string::String, + #[prost(message, repeated, tag = "2")] + pub discover_points: ::prost::alloc::vec::Vec, + /// Options for specifying read consistency guarantees + #[prost(message, optional, tag = "3")] + pub read_consistency: ::core::option::Option, + /// If set, overrides global timeout setting for this request. Unit is seconds. + #[prost(uint64, optional, tag = "4")] + pub timeout: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2723,13 +3267,19 @@ pub struct CountPoints { /// If `true` - return exact count, if `false` - return approximate count #[prost(bool, optional, tag = "3")] pub exact: ::core::option::Option, + /// Options for specifying read consistency guarantees + #[prost(message, optional, tag = "4")] + pub read_consistency: ::core::option::Option, + /// Specify in which shards to look for the points, if not specified - look in all shards + #[prost(message, optional, tag = "5")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] pub struct PointsUpdateOperation { #[prost( oneof = "points_update_operation::Operation", - tags = "1, 2, 3, 4, 5, 6, 7, 8" + tags = "1, 2, 3, 4, 5, 6, 7, 8, 9, 10" )] pub operation: ::core::option::Option, } @@ -2740,6 +3290,9 @@ pub mod points_update_operation { pub struct PointStructList { #[prost(message, repeated, tag = "1")] pub points: ::prost::alloc::vec::Vec, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "2")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2752,6 +3305,9 @@ pub mod points_update_operation { /// Affected points #[prost(message, optional, tag = "2")] pub points_selector: ::core::option::Option, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "3")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2761,6 +3317,9 @@ pub mod points_update_operation { /// Affected points #[prost(message, optional, tag = "2")] pub points_selector: ::core::option::Option, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "3")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2768,6 +3327,9 @@ pub mod points_update_operation { /// List of points and vectors to update #[prost(message, repeated, tag = "1")] pub points: ::prost::alloc::vec::Vec, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "2")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2778,6 +3340,29 @@ pub mod points_update_operation { /// List of vector names to delete #[prost(message, optional, tag = "2")] pub vectors: ::core::option::Option, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "3")] + pub shard_key_selector: ::core::option::Option, + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct DeletePoints { + /// Affected points + #[prost(message, optional, tag = "1")] + pub points: ::core::option::Option, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "2")] + pub shard_key_selector: ::core::option::Option, + } + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct ClearPayload { + /// Affected points + #[prost(message, optional, tag = "1")] + pub points: ::core::option::Option, + /// Option for custom sharding to specify used shard keys + #[prost(message, optional, tag = "2")] + pub shard_key_selector: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Oneof)] @@ -2785,7 +3370,7 @@ pub mod points_update_operation { #[prost(message, tag = "1")] Upsert(PointStructList), #[prost(message, tag = "2")] - Delete(super::PointsSelector), + DeleteDeprecated(super::PointsSelector), #[prost(message, tag = "3")] SetPayload(SetPayload), #[prost(message, tag = "4")] @@ -2793,11 +3378,15 @@ pub mod points_update_operation { #[prost(message, tag = "5")] DeletePayload(DeletePayload), #[prost(message, tag = "6")] - ClearPayload(super::PointsSelector), + ClearPayloadDeprecated(super::PointsSelector), #[prost(message, tag = "7")] UpdateVectors(UpdateVectors), #[prost(message, tag = "8")] DeleteVectors(DeleteVectors), + #[prost(message, tag = "9")] + DeletePoints(DeletePoints), + #[prost(message, tag = "10")] + ClearPayload(ClearPayload), } } #[allow(clippy::derive_partial_eq_without_eq)] @@ -2828,8 +3417,8 @@ pub struct PointsOperationResponse { #[derive(Clone, PartialEq, ::prost::Message)] pub struct UpdateResult { /// Number of operation - #[prost(uint64, tag = "1")] - pub operation_id: u64, + #[prost(uint64, optional, tag = "1")] + pub operation_id: ::core::option::Option, /// Operation status #[prost(enumeration = "UpdateStatus", tag = "2")] pub status: i32, @@ -2852,6 +3441,9 @@ pub struct ScoredPoint { /// Vectors to search #[prost(message, optional, tag = "6")] pub vectors: ::core::option::Option, + /// Shard key + #[prost(message, optional, tag = "7")] + pub shard_key: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2964,6 +3556,9 @@ pub struct RetrievedPoint { pub payload: ::std::collections::HashMap<::prost::alloc::string::String, Value>, #[prost(message, optional, tag = "4")] pub vectors: ::core::option::Option, + /// Shard key + #[prost(message, optional, tag = "5")] + pub shard_key: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2994,6 +3589,24 @@ pub struct RecommendBatchResponse { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct DiscoverResponse { + #[prost(message, repeated, tag = "1")] + pub result: ::prost::alloc::vec::Vec, + /// Time spent to process + #[prost(double, tag = "2")] + pub time: f64, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct DiscoverBatchResponse { + #[prost(message, repeated, tag = "1")] + pub result: ::prost::alloc::vec::Vec, + /// Time spent to process + #[prost(double, tag = "2")] + pub time: f64, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct RecommendGroupsResponse { #[prost(message, optional, tag = "1")] pub result: ::core::option::Option, @@ -3961,6 +4574,71 @@ pub mod points_client { self.inner.unary(req, path, codec).await } /// + /// Use context and a target to find the most similar points to the target, constrained by the context. + /// + /// When using only the context (without a target), a special search - called context search - is performed where + /// pairs of points are used to generate a loss that guides the search towards the zone where + /// most positive examples overlap. This means that the score minimizes the scenario of + /// finding a point closer to a negative than to a positive part of a pair. + /// + /// Since the score of a context relates to loss, the maximum score a point can get is 0.0, + /// and it becomes normal that many points can have a score of 0.0. + /// + /// When using target (with or without context), the score behaves a little different: The + /// integer part of the score represents the rank with respect to the context, while the + /// decimal part of the score relates to the distance to the target. The context part of the score for + /// each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, + /// and -1 otherwise. + pub async fn discover( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/qdrant.Points/Discover"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("qdrant.Points", "Discover")); + self.inner.unary(req, path, codec).await + } + /// + /// Batch request points based on { positive, negative } pairs of examples, and/or a target + pub async fn discover_batch( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/qdrant.Points/DiscoverBatch", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("qdrant.Points", "DiscoverBatch")); + self.inner.unary(req, path, codec).await + } + /// /// Count points in collection with given filtering conditions pub async fn count( &mut self, @@ -4170,6 +4848,38 @@ pub mod points_server { tonic::Status, >; /// + /// Use context and a target to find the most similar points to the target, constrained by the context. + /// + /// When using only the context (without a target), a special search - called context search - is performed where + /// pairs of points are used to generate a loss that guides the search towards the zone where + /// most positive examples overlap. This means that the score minimizes the scenario of + /// finding a point closer to a negative than to a positive part of a pair. + /// + /// Since the score of a context relates to loss, the maximum score a point can get is 0.0, + /// and it becomes normal that many points can have a score of 0.0. + /// + /// When using target (with or without context), the score behaves a little different: The + /// integer part of the score represents the rank with respect to the context, while the + /// decimal part of the score relates to the distance to the target. The context part of the score for + /// each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, + /// and -1 otherwise. + async fn discover( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// + /// Batch request points based on { positive, negative } pairs of examples, and/or a target + async fn discover_batch( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// /// Count points in collection with given filtering conditions async fn count( &self, @@ -5058,6 +5768,94 @@ pub mod points_server { }; Box::pin(fut) } + "/qdrant.Points/Discover" => { + #[allow(non_camel_case_types)] + struct DiscoverSvc(pub Arc); + impl tonic::server::UnaryService + for DiscoverSvc { + type Response = super::DiscoverResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { (*inner).discover(request).await }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DiscoverSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/qdrant.Points/DiscoverBatch" => { + #[allow(non_camel_case_types)] + struct DiscoverBatchSvc(pub Arc); + impl< + T: Points, + > tonic::server::UnaryService + for DiscoverBatchSvc { + type Response = super::DiscoverBatchResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).discover_batch(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = DiscoverBatchSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/qdrant.Points/Count" => { #[allow(non_camel_case_types)] struct CountSvc(pub Arc); @@ -5394,7 +6192,7 @@ pub mod snapshots_client { self.inner.unary(req, path, codec).await } /// - /// Delete collection snapshots + /// Delete collection snapshot pub async fn delete( &mut self, request: impl tonic::IntoRequest, @@ -5471,7 +6269,7 @@ pub mod snapshots_client { self.inner.unary(req, path, codec).await } /// - /// List full storage snapshots + /// Delete full storage snapshot pub async fn delete_full( &mut self, request: impl tonic::IntoRequest, @@ -5525,7 +6323,7 @@ pub mod snapshots_server { tonic::Status, >; /// - /// Delete collection snapshots + /// Delete collection snapshot async fn delete( &self, request: tonic::Request, @@ -5552,7 +6350,7 @@ pub mod snapshots_server { tonic::Status, >; /// - /// List full storage snapshots + /// Delete full storage snapshot async fn delete_full( &self, request: tonic::Request, diff --git a/tools/sync_proto.sh b/tools/sync_proto.sh index 7454fd9..c48f8bf 100755 --- a/tools/sync_proto.sh +++ b/tools/sync_proto.sh @@ -25,6 +25,7 @@ rm $CLIENT_DIR/collections_internal_service.proto rm $CLIENT_DIR/qdrant_internal_service.proto rm $CLIENT_DIR/raft_service.proto +rm $CLIENT_DIR/shard_snapshots_service.proto rm $CLIENT_DIR/health_check.proto cat $CLIENT_DIR/qdrant.proto \ @@ -32,6 +33,7 @@ cat $CLIENT_DIR/qdrant.proto \ | grep -v 'points_internal_service.proto' \ | grep -v 'qdrant_internal_service.proto' \ | grep -v 'raft_service.proto' \ + | grep -v 'shard_snapshots_service.proto' \ | grep -v 'health_check.proto' \ > $CLIENT_DIR/qdrant_tmp.proto