From 693f36ea586c59377ecd12b0362c542697a563dd Mon Sep 17 00:00:00 2001 From: Arnaud Gourlay Date: Fri, 1 Mar 2024 14:45:59 +0100 Subject: [PATCH 01/10] Update v1.8.0 --- Cargo.toml | 2 +- proto/collections.proto | 67 ++++++-- proto/collections_service.proto | 14 +- proto/points.proto | 51 ++++++- proto/points_service.proto | 50 +++--- proto/qdrant.proto | 1 + proto/snapshots_service.proto | 11 +- src/qdrant.rs | 261 +++++++++++++++++++++++++++++++- tests/integration-tests.sh | 2 +- 9 files changed, 397 insertions(+), 62 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 607087b..7d91954 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "qdrant-client" -version = "1.6.0" +version = "1.7.0" edition = "2021" authors = ["Qdrant Team "] description = "Rust client for Qdrant Vector Search Engine" diff --git a/proto/collections.proto b/proto/collections.proto index 765ffcb..b2a7880 100644 --- a/proto/collections.proto +++ b/proto/collections.proto @@ -50,6 +50,19 @@ message GetCollectionInfoRequest { string collection_name = 1; // Name of the collection } +message CollectionExistsRequest { + string collection_name = 1; +} + +message CollectionExists { + bool exists = 1; +} + +message CollectionExistsResponse { + CollectionExists result = 1; + double time = 2; // Time spent to process +} + message ListCollectionsRequest { } @@ -90,6 +103,7 @@ enum PayloadSchemaType { Geo = 4; Text = 5; Bool = 6; + Datetime = 7; } enum QuantizationType { @@ -113,7 +127,7 @@ message OptimizerStatus { message HnswConfigDiff { /* Number of edges per node in the index graph. Larger the value - more accurate the search, more space required. - */ + */ optional uint64 m = 1; /* Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index. @@ -127,16 +141,19 @@ message HnswConfigDiff { */ optional uint64 full_scan_threshold = 3; /* - Number of parallel threads used for background index building. If 0 - auto selection. - */ + Number of parallel threads used for background index building. + If 0 - automatically select from 8 to 16. + Best to keep between 8 and 16 to prevent likelihood of building broken/inefficient HNSW graphs. + On small CPUs, less threads are used. + */ optional uint64 max_indexing_threads = 4; /* Store HNSW index on disk. If set to false, the index will be stored in RAM. - */ + */ optional bool on_disk = 5; /* - Number of additional payload-aware links per node in the index graph. If not set - regular M parameter will be used. - */ + Number of additional payload-aware links per node in the index graph. If not set - regular M parameter will be used. + */ optional uint64 payload_m = 6; } @@ -160,11 +177,11 @@ message WalConfigDiff { message OptimizersConfigDiff { /* The minimal fraction of deleted vectors in a segment, required to perform segment optimization - */ + */ optional double deleted_threshold = 1; /* The minimal number of vectors in a segment, required to perform segment optimization - */ + */ optional uint64 vacuum_min_vector_number = 2; /* Target amount of segments the optimizer will try to keep. @@ -181,7 +198,7 @@ message OptimizersConfigDiff { Do not create segments larger this size (in kilobytes). Large segments might require disproportionately long indexation times, therefore it makes sense to limit the size of segments. - + If indexing speed is more important - make this parameter lower. If search speed is more important - make this parameter higher. Note: 1Kb = 1 vector of size 256 @@ -201,11 +218,11 @@ message OptimizersConfigDiff { optional uint64 memmap_threshold = 5; /* Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing - + Default value is 20,000, based on . - + To disable vector indexing, set to `0`. - + Note: 1kB = 1 vector of size 256. */ optional uint64 indexing_threshold = 6; @@ -214,7 +231,10 @@ message OptimizersConfigDiff { */ optional uint64 flush_interval_sec = 7; /* - Max number of threads, which can be used for optimization. If 0 - `NUM_CPU - 1` will be used + Max number of threads (jobs) for running optimizations per shard. + Note: each optimization job will also use `max_indexing_threads` threads by itself for index building. + If null - have no limit and choose dynamically to saturate CPU. + If 0 - no optimization threads, optimizations will be disabled. */ optional uint64 max_optimization_threads = 8; } @@ -343,9 +363,15 @@ message TextIndexParams { optional uint64 max_token_len = 4; // Maximal token length } +message IntegerIndexParams { + bool lookup = 1; // If true - support direct lookups. + bool range = 2; // If true - support ranges filters. +} + message PayloadIndexParams { oneof index_params { TextIndexParams text_index_params = 1; // Parameters for text index + IntegerIndexParams integer_index_params = 2; // Parameters for integer index } } @@ -423,6 +449,8 @@ enum ReplicaState { Initializing = 3; // Collection is being created Listener = 4; // A shard which receives data, but is not used for search; Useful for backup shards PartialSnapshot = 5; // Snapshot shard transfer is in progress; Updates should not be sent to (and are ignored by) the shard + Recovery = 6; // Shard is undergoing recovered by an external node; Normally rejects updates, accepts updates if force is true + // TODO(1.9): deprecate PartialSnapshot state } message ShardKey { @@ -468,9 +496,17 @@ message MoveShard { optional ShardTransferMethod method = 4; } +message RestartTransfer { + uint32 shard_id = 1; // Local shard id + uint64 from_peer_id = 2; + uint64 to_peer_id = 3; + ShardTransferMethod method = 4; +} + enum ShardTransferMethod { - StreamRecords = 0; - Snapshot = 1; + StreamRecords = 0; // Stream shard records in batches + Snapshot = 1; // Snapshot the shard and recover it on the target peer + WalDelta = 2; // Resolve WAL delta between peers and transfer the difference } message Replica { @@ -498,6 +534,7 @@ message UpdateCollectionClusterSetupRequest { Replica drop_replica = 5; CreateShardKey create_shard_key = 7; DeleteShardKey delete_shard_key = 8; + RestartTransfer restart_transfer = 9; } optional uint64 timeout = 6; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied } diff --git a/proto/collections_service.proto b/proto/collections_service.proto index 723124e..52caa8d 100644 --- a/proto/collections_service.proto +++ b/proto/collections_service.proto @@ -8,23 +8,23 @@ option csharp_namespace = "Qdrant.Client.Grpc"; service Collections { /* Get detailed information about specified existing collection - */ + */ rpc Get (GetCollectionInfoRequest) returns (GetCollectionInfoResponse) {} /* Get list name of all existing collections - */ + */ rpc List (ListCollectionsRequest) returns (ListCollectionsResponse) {} /* Create new collection with given parameters - */ + */ rpc Create (CreateCollection) returns (CollectionOperationResponse) {} /* Update parameters of the existing collection - */ + */ rpc Update (UpdateCollection) returns (CollectionOperationResponse) {} /* Drop collection and all associated data - */ + */ rpc Delete (DeleteCollection) returns (CollectionOperationResponse) {} /* Update Aliases of the existing collection @@ -43,6 +43,10 @@ service Collections { */ rpc CollectionClusterInfo (CollectionClusterInfoRequest) returns (CollectionClusterInfoResponse) {} /* + Check the existence of a collection + */ + rpc CollectionExists (CollectionExistsRequest) returns (CollectionExistsResponse) {} + /* Update cluster setup for a collection */ rpc UpdateCollectionClusterSetup (UpdateCollectionClusterSetupRequest) returns (UpdateCollectionClusterSetupResponse) {} diff --git a/proto/points.proto b/proto/points.proto index f95a4d3..71d3b2b 100644 --- a/proto/points.proto +++ b/proto/points.proto @@ -3,8 +3,9 @@ syntax = "proto3"; package qdrant; option csharp_namespace = "Qdrant.Client.Grpc"; -import "json_with_int.proto"; import "collections.proto"; +import "google/protobuf/timestamp.proto"; +import "json_with_int.proto"; enum WriteOrderingType { @@ -118,6 +119,7 @@ message SetPayloadPoints { optional PointsSelector points_selector = 5; // Affected points optional WriteOrdering ordering = 6; // Write ordering guarantees optional ShardKeySelector shard_key_selector = 7; // Option for custom sharding to specify used shard keys + optional string key = 8; // Option for indicate property of payload } message DeletePayloadPoints { @@ -145,6 +147,7 @@ enum FieldType { FieldTypeGeo = 3; FieldTypeText = 4; FieldTypeBool = 5; + FieldTypeDatetime = 6; } message CreateFieldIndexCollection { @@ -204,12 +207,12 @@ message WithVectorsSelector { message QuantizationSearchParams { /* If set to true, search will ignore quantized vector data - */ + */ optional bool ignore = 1; /* If true, use original vectors to re-score top-k results. If ignored, qdrant decides automatically does rescore enabled or not. - */ + */ optional bool rescore = 2; /* @@ -220,7 +223,7 @@ message QuantizationSearchParams { For example, if `oversampling` is 2.4 and `limit` is 100, then 240 vectors will be pre-selected using quantized index, and then top-100 will be returned after re-scoring. - */ + */ optional double oversampling = 3; } @@ -228,7 +231,7 @@ message SearchParams { /* Params relevant to HNSW index. Size of the beam in a beam-search. Larger the value - more accurate the result, more time required for search. - */ + */ optional uint64 hnsw_ef = 1; /* @@ -244,7 +247,7 @@ message SearchParams { If enabled, the engine will only perform search among indexed or small segments. Using this option prevents slow searches in case of delayed index, but does not guarantee that all uploaded vectors will be included in search results - */ + */ optional bool indexed_only = 4; } @@ -299,6 +302,26 @@ message SearchPointGroups { optional SparseIndices sparse_indices = 16; } +enum Direction { + Asc = 0; + Desc = 1; +} + +message StartFrom { + oneof value { + double float = 1; + int64 integer = 2; + google.protobuf.Timestamp timestamp = 3; + string datetime = 4; + } +} + +message OrderBy { + string key = 1; // Payload key to order by + optional Direction direction = 2; // Ascending or descending order + optional StartFrom start_from = 3; // Start from this value +} + message ScrollPoints { string collection_name = 1; Filter filter = 2; // Filter conditions - return only those points that satisfy the specified conditions @@ -309,6 +332,7 @@ message ScrollPoints { optional WithVectorsSelector with_vectors = 7; // Options for specifying which vectors to include into response optional ReadConsistency read_consistency = 8; // Options for specifying read consistency guarantees optional ShardKeySelector shard_key_selector = 9; // Specify in which shards to look for the points, if not specified - look in all shards + optional OrderBy order_by = 10; // Order the records by a payload field } // How to use positive and negative vectors to find the results, default is `AverageVector`: @@ -442,6 +466,7 @@ message PointsUpdateOperation { map payload = 1; optional PointsSelector points_selector = 2; // Affected points optional ShardKeySelector shard_key_selector = 3; // Option for custom sharding to specify used shard keys + optional string key = 4; // Option for indicate property of payload } message DeletePayload { repeated string keys = 1; @@ -623,6 +648,12 @@ message Filter { repeated Condition should = 1; // At least one of those conditions should match repeated Condition must = 2; // All conditions must match repeated Condition must_not = 3; // All conditions must NOT match + optional MinShould min_should = 4; // At least minimum amount of given conditions should match +} + +message MinShould { + repeated Condition conditions = 1; + uint64 min_count = 2; } message Condition { @@ -661,6 +692,7 @@ message FieldCondition { GeoRadius geo_radius = 5; // Check if geo point is within a given radius ValuesCount values_count = 6; // Check number of values for a specific field GeoPolygon geo_polygon = 7; // Check if geo point is within a given polygon + DatetimeRange datetime_range = 8; // Check if datetime is within a given range } message Match { @@ -691,6 +723,13 @@ message Range { optional double lte = 4; } +message DatetimeRange { + optional google.protobuf.Timestamp lt = 1; + optional google.protobuf.Timestamp gt = 2; + optional google.protobuf.Timestamp gte = 3; + optional google.protobuf.Timestamp lte = 4; +} + message GeoBoundingBox { GeoPoint top_left = 1; // north-west corner GeoPoint bottom_right = 2; // south-east corner diff --git a/proto/points_service.proto b/proto/points_service.proto index ddfdc62..9338d07 100644 --- a/proto/points_service.proto +++ b/proto/points_service.proto @@ -8,59 +8,59 @@ option csharp_namespace = "Qdrant.Client.Grpc"; service Points { /* Perform insert + updates on points. If a point with a given ID already exists - it will be overwritten. - */ + */ rpc Upsert (UpsertPoints) returns (PointsOperationResponse) {} /* Delete points - */ + */ rpc Delete (DeletePoints) returns (PointsOperationResponse) {} /* Retrieve points - */ + */ rpc Get (GetPoints) returns (GetResponse) {} /* Update named vectors for point - */ + */ rpc UpdateVectors (UpdatePointVectors) returns (PointsOperationResponse) {} /* Delete named vectors for points - */ + */ rpc DeleteVectors (DeletePointVectors) returns (PointsOperationResponse) {} /* Set payload for points - */ + */ rpc SetPayload (SetPayloadPoints) returns (PointsOperationResponse) {} /* Overwrite payload for points - */ + */ rpc OverwritePayload (SetPayloadPoints) returns (PointsOperationResponse) {} /* Delete specified key payload for points - */ + */ rpc DeletePayload (DeletePayloadPoints) returns (PointsOperationResponse) {} /* Remove all payload for specified points - */ + */ rpc ClearPayload (ClearPayloadPoints) returns (PointsOperationResponse) {} /* Create index for field in collection - */ + */ rpc CreateFieldIndex (CreateFieldIndexCollection) returns (PointsOperationResponse) {} /* Delete field index for collection - */ + */ rpc DeleteFieldIndex (DeleteFieldIndexCollection) returns (PointsOperationResponse) {} /* Retrieve closest points based on vector similarity and given filtering conditions - */ + */ rpc Search (SearchPoints) returns (SearchResponse) {} /* - Retrieve closest points based on vector similarity and given filtering conditions - */ + Retrieve closest points based on vector similarity and given filtering conditions + */ rpc SearchBatch (SearchBatchPoints) returns (SearchBatchResponse) {} /* Retrieve closest points based on vector similarity and given filtering conditions, grouped by a given field - */ + */ rpc SearchGroups (SearchPointGroups) returns (SearchGroupsResponse) {} /* Iterate over all or filtered points @@ -68,19 +68,19 @@ service Points { rpc Scroll (ScrollPoints) returns (ScrollResponse) {} /* Look for the points which are closer to stored positive examples and at the same time further to negative examples. - */ + */ rpc Recommend (RecommendPoints) returns (RecommendResponse) {} /* Look for the points which are closer to stored positive examples and at the same time further to negative examples. - */ + */ rpc RecommendBatch (RecommendBatchPoints) returns (RecommendBatchResponse) {} /* Look for the points which are closer to stored positive examples and at the same time further to negative examples, grouped by a given field - */ + */ rpc RecommendGroups (RecommendPointGroups) returns (RecommendGroupsResponse) {} /* Use context and a target to find the most similar points to the target, constrained by the context. - + When using only the context (without a target), a special search - called context search - is performed where pairs of points are used to generate a loss that guides the search towards the zone where most positive examples overlap. This means that the score minimizes the scenario of @@ -88,25 +88,25 @@ service Points { Since the score of a context relates to loss, the maximum score a point can get is 0.0, and it becomes normal that many points can have a score of 0.0. - + When using target (with or without context), the score behaves a little different: The integer part of the score represents the rank with respect to the context, while the decimal part of the score relates to the distance to the target. The context part of the score for each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair, and -1 otherwise. - */ + */ rpc Discover (DiscoverPoints) returns (DiscoverResponse) {} /* Batch request points based on { positive, negative } pairs of examples, and/or a target - */ + */ rpc DiscoverBatch (DiscoverBatchPoints) returns (DiscoverBatchResponse) {} /* - Count points in collection with given filtering conditions - */ + Count points in collection with given filtering conditions + */ rpc Count (CountPoints) returns (CountResponse) {} /* - Perform multiple update operations in one request + Perform multiple update operations in one request */ rpc UpdateBatch (UpdateBatchPoints) returns (UpdateBatchResponse) {} } diff --git a/proto/qdrant.proto b/proto/qdrant.proto index 4f58399..723bb65 100644 --- a/proto/qdrant.proto +++ b/proto/qdrant.proto @@ -16,4 +16,5 @@ message HealthCheckRequest {} message HealthCheckReply { string title = 1; string version = 2; + optional string commit = 3; } diff --git a/proto/snapshots_service.proto b/proto/snapshots_service.proto index 5127b28..63c9e51 100644 --- a/proto/snapshots_service.proto +++ b/proto/snapshots_service.proto @@ -7,16 +7,16 @@ import "google/protobuf/timestamp.proto"; service Snapshots { /* - Create collection snapshot + Create collection snapshot */ rpc Create (CreateSnapshotRequest) returns (CreateSnapshotResponse) {} /* List collection snapshots - */ + */ rpc List (ListSnapshotsRequest) returns (ListSnapshotsResponse) {} /* Delete collection snapshot - */ + */ rpc Delete (DeleteSnapshotRequest) returns (DeleteSnapshotResponse) {} /* Create full storage snapshot @@ -24,11 +24,11 @@ service Snapshots { rpc CreateFull (CreateFullSnapshotRequest) returns (CreateSnapshotResponse) {} /* List full storage snapshots - */ + */ rpc ListFull (ListFullSnapshotsRequest) returns (ListSnapshotsResponse) {} /* Delete full storage snapshot - */ + */ rpc DeleteFull (DeleteFullSnapshotRequest) returns (DeleteSnapshotResponse) {} } @@ -57,6 +57,7 @@ message SnapshotDescription { string name = 1; // Name of the snapshot google.protobuf.Timestamp creation_time = 2; // Creation time of the snapshot int64 size = 3; // Size of the snapshot in bytes + optional string checksum = 4; // SHA256 digest of the snapshot file } message CreateSnapshotResponse { diff --git a/src/qdrant.rs b/src/qdrant.rs index 9c8d020..bcd3a64 100644 --- a/src/qdrant.rs +++ b/src/qdrant.rs @@ -104,6 +104,27 @@ pub struct GetCollectionInfoRequest { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct CollectionExistsRequest { + #[prost(string, tag = "1")] + pub collection_name: ::prost::alloc::string::String, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CollectionExists { + #[prost(bool, tag = "1")] + pub exists: bool, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CollectionExistsResponse { + #[prost(message, optional, tag = "1")] + pub result: ::core::option::Option, + /// Time spent to process + #[prost(double, tag = "2")] + pub time: f64, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct ListCollectionsRequest {} #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -157,7 +178,10 @@ pub struct HnswConfigDiff { #[prost(uint64, optional, tag = "3")] pub full_scan_threshold: ::core::option::Option, /// - /// Number of parallel threads used for background index building. If 0 - auto selection. + /// Number of parallel threads used for background index building. + /// If 0 - automatically select from 8 to 16. + /// Best to keep between 8 and 16 to prevent likelihood of building broken/inefficient HNSW graphs. + /// On small CPUs, less threads are used. #[prost(uint64, optional, tag = "4")] pub max_indexing_threads: ::core::option::Option, /// @@ -251,7 +275,10 @@ pub struct OptimizersConfigDiff { #[prost(uint64, optional, tag = "7")] pub flush_interval_sec: ::core::option::Option, /// - /// Max number of threads, which can be used for optimization. If 0 - `NUM_CPU - 1` will be used + /// Max number of threads (jobs) for running optimizations per shard. + /// Note: each optimization job will also use `max_indexing_threads` threads by itself for index building. + /// If null - have no limit and choose dynamically to saturate CPU. + /// If 0 - no optimization threads, optimizations will be disabled. #[prost(uint64, optional, tag = "8")] pub max_optimization_threads: ::core::option::Option, } @@ -503,8 +530,18 @@ pub struct TextIndexParams { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct IntegerIndexParams { + /// If true - support direct lookups. + #[prost(bool, tag = "1")] + pub lookup: bool, + /// If true - support ranges filters. + #[prost(bool, tag = "2")] + pub range: bool, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct PayloadIndexParams { - #[prost(oneof = "payload_index_params::IndexParams", tags = "1")] + #[prost(oneof = "payload_index_params::IndexParams", tags = "1, 2")] pub index_params: ::core::option::Option, } /// Nested message and enum types in `PayloadIndexParams`. @@ -515,6 +552,9 @@ pub mod payload_index_params { /// Parameters for text index #[prost(message, tag = "1")] TextIndexParams(super::TextIndexParams), + /// Parameters for integer index + #[prost(message, tag = "2")] + IntegerIndexParams(super::IntegerIndexParams), } } #[allow(clippy::derive_partial_eq_without_eq)] @@ -752,6 +792,19 @@ pub struct MoveShard { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct RestartTransfer { + /// Local shard id + #[prost(uint32, tag = "1")] + pub shard_id: u32, + #[prost(uint64, tag = "2")] + pub from_peer_id: u64, + #[prost(uint64, tag = "3")] + pub to_peer_id: u64, + #[prost(enumeration = "ShardTransferMethod", tag = "4")] + pub method: i32, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct Replica { #[prost(uint32, tag = "1")] pub shard_id: u32, @@ -792,7 +845,7 @@ pub struct UpdateCollectionClusterSetupRequest { pub timeout: ::core::option::Option, #[prost( oneof = "update_collection_cluster_setup_request::Operation", - tags = "2, 3, 4, 5, 7, 8" + tags = "2, 3, 4, 5, 7, 8, 9" )] pub operation: ::core::option::Option< update_collection_cluster_setup_request::Operation, @@ -815,6 +868,8 @@ pub mod update_collection_cluster_setup_request { CreateShardKey(super::CreateShardKey), #[prost(message, tag = "8")] DeleteShardKey(super::DeleteShardKey), + #[prost(message, tag = "9")] + RestartTransfer(super::RestartTransfer), } } #[allow(clippy::derive_partial_eq_without_eq)] @@ -941,6 +996,7 @@ pub enum PayloadSchemaType { Geo = 4, Text = 5, Bool = 6, + Datetime = 7, } impl PayloadSchemaType { /// String value of the enum field names used in the ProtoBuf definition. @@ -956,6 +1012,7 @@ impl PayloadSchemaType { PayloadSchemaType::Geo => "Geo", PayloadSchemaType::Text => "Text", PayloadSchemaType::Bool => "Bool", + PayloadSchemaType::Datetime => "Datetime", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -968,6 +1025,7 @@ impl PayloadSchemaType { "Geo" => Some(Self::Geo), "Text" => Some(Self::Text), "Bool" => Some(Self::Bool), + "Datetime" => Some(Self::Datetime), _ => None, } } @@ -1111,6 +1169,8 @@ pub enum ReplicaState { Listener = 4, /// Snapshot shard transfer is in progress; Updates should not be sent to (and are ignored by) the shard PartialSnapshot = 5, + /// Shard is undergoing recovered by an external node; Normally rejects updates, accepts updates if force is true + Recovery = 6, } impl ReplicaState { /// String value of the enum field names used in the ProtoBuf definition. @@ -1125,6 +1185,7 @@ impl ReplicaState { ReplicaState::Initializing => "Initializing", ReplicaState::Listener => "Listener", ReplicaState::PartialSnapshot => "PartialSnapshot", + ReplicaState::Recovery => "Recovery", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1136,6 +1197,7 @@ impl ReplicaState { "Initializing" => Some(Self::Initializing), "Listener" => Some(Self::Listener), "PartialSnapshot" => Some(Self::PartialSnapshot), + "Recovery" => Some(Self::Recovery), _ => None, } } @@ -1143,8 +1205,12 @@ impl ReplicaState { #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] #[repr(i32)] pub enum ShardTransferMethod { + /// Stream shard records in batches StreamRecords = 0, + /// Snapshot the shard and recover it on the target peer Snapshot = 1, + /// Resolve WAL delta between peers and transfer the difference + WalDelta = 2, } impl ShardTransferMethod { /// String value of the enum field names used in the ProtoBuf definition. @@ -1155,6 +1221,7 @@ impl ShardTransferMethod { match self { ShardTransferMethod::StreamRecords => "StreamRecords", ShardTransferMethod::Snapshot => "Snapshot", + ShardTransferMethod::WalDelta => "WalDelta", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -1162,6 +1229,7 @@ impl ShardTransferMethod { match value { "StreamRecords" => Some(Self::StreamRecords), "Snapshot" => Some(Self::Snapshot), + "WalDelta" => Some(Self::WalDelta), _ => None, } } @@ -1486,6 +1554,33 @@ pub mod collections_client { self.inner.unary(req, path, codec).await } /// + /// Check the existence of a collection + pub async fn collection_exists( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::new( + tonic::Code::Unknown, + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic::codec::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/qdrant.Collections/CollectionExists", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("qdrant.Collections", "CollectionExists")); + self.inner.unary(req, path, codec).await + } + /// /// Update cluster setup for a collection pub async fn update_collection_cluster_setup( &mut self, @@ -1659,6 +1754,15 @@ pub mod collections_server { tonic::Status, >; /// + /// Check the existence of a collection + async fn collection_exists( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// /// Update cluster setup for a collection async fn update_collection_cluster_setup( &self, @@ -2169,6 +2273,52 @@ pub mod collections_server { }; Box::pin(fut) } + "/qdrant.Collections/CollectionExists" => { + #[allow(non_camel_case_types)] + struct CollectionExistsSvc(pub Arc); + impl< + T: Collections, + > tonic::server::UnaryService + for CollectionExistsSvc { + type Response = super::CollectionExistsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + (*inner).collection_exists(request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let inner = inner.0; + let method = CollectionExistsSvc(inner); + let codec = tonic::codec::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } "/qdrant.Collections/UpdateCollectionClusterSetup" => { #[allow(non_camel_case_types)] struct UpdateCollectionClusterSetupSvc(pub Arc); @@ -2646,6 +2796,9 @@ pub struct SetPayloadPoints { /// Option for custom sharding to specify used shard keys #[prost(message, optional, tag = "7")] pub shard_key_selector: ::core::option::Option, + /// Option for indicate property of payload + #[prost(string, optional, tag = "8")] + pub key: ::core::option::Option<::prost::alloc::string::String>, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -2980,6 +3133,40 @@ pub struct SearchPointGroups { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct StartFrom { + #[prost(oneof = "start_from::Value", tags = "1, 2, 3, 4")] + pub value: ::core::option::Option, +} +/// Nested message and enum types in `StartFrom`. +pub mod start_from { + #[allow(clippy::derive_partial_eq_without_eq)] + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum Value { + #[prost(double, tag = "1")] + Float(f64), + #[prost(int64, tag = "2")] + Integer(i64), + #[prost(message, tag = "3")] + Timestamp(::prost_types::Timestamp), + #[prost(string, tag = "4")] + Datetime(::prost::alloc::string::String), + } +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct OrderBy { + /// Payload key to order by + #[prost(string, tag = "1")] + pub key: ::prost::alloc::string::String, + /// Ascending or descending order + #[prost(enumeration = "Direction", optional, tag = "2")] + pub direction: ::core::option::Option, + /// Start from this value + #[prost(message, optional, tag = "3")] + pub start_from: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct ScrollPoints { #[prost(string, tag = "1")] pub collection_name: ::prost::alloc::string::String, @@ -3004,6 +3191,9 @@ pub struct ScrollPoints { /// Specify in which shards to look for the points, if not specified - look in all shards #[prost(message, optional, tag = "9")] pub shard_key_selector: ::core::option::Option, + /// Order the records by a payload field + #[prost(message, optional, tag = "10")] + pub order_by: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -3308,6 +3498,9 @@ pub mod points_update_operation { /// Option for custom sharding to specify used shard keys #[prost(message, optional, tag = "3")] pub shard_key_selector: ::core::option::Option, + /// Option for indicate property of payload + #[prost(string, optional, tag = "4")] + pub key: ::core::option::Option<::prost::alloc::string::String>, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -3635,6 +3828,17 @@ pub struct Filter { /// All conditions must NOT match #[prost(message, repeated, tag = "3")] pub must_not: ::prost::alloc::vec::Vec, + /// At least minimum amount of given conditions should match + #[prost(message, optional, tag = "4")] + pub min_should: ::core::option::Option, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct MinShould { + #[prost(message, repeated, tag = "1")] + pub conditions: ::prost::alloc::vec::Vec, + #[prost(uint64, tag = "2")] + pub min_count: u64, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -3712,6 +3916,9 @@ pub struct FieldCondition { /// Check if geo point is within a given polygon #[prost(message, optional, tag = "7")] pub geo_polygon: ::core::option::Option, + /// Check if datetime is within a given range + #[prost(message, optional, tag = "8")] + pub datetime_range: ::core::option::Option, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -3776,6 +3983,18 @@ pub struct Range { } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] +pub struct DatetimeRange { + #[prost(message, optional, tag = "1")] + pub lt: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "2")] + pub gt: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "3")] + pub gte: ::core::option::Option<::prost_types::Timestamp>, + #[prost(message, optional, tag = "4")] + pub lte: ::core::option::Option<::prost_types::Timestamp>, +} +#[allow(clippy::derive_partial_eq_without_eq)] +#[derive(Clone, PartialEq, ::prost::Message)] pub struct GeoBoundingBox { /// north-west corner #[prost(message, optional, tag = "1")] @@ -3941,6 +4160,7 @@ pub enum FieldType { Geo = 3, Text = 4, Bool = 5, + Datetime = 6, } impl FieldType { /// String value of the enum field names used in the ProtoBuf definition. @@ -3955,6 +4175,7 @@ impl FieldType { FieldType::Geo => "FieldTypeGeo", FieldType::Text => "FieldTypeText", FieldType::Bool => "FieldTypeBool", + FieldType::Datetime => "FieldTypeDatetime", } } /// Creates an enum from field names used in the ProtoBuf definition. @@ -3966,6 +4187,33 @@ impl FieldType { "FieldTypeGeo" => Some(Self::Geo), "FieldTypeText" => Some(Self::Text), "FieldTypeBool" => Some(Self::Bool), + "FieldTypeDatetime" => Some(Self::Datetime), + _ => None, + } + } +} +#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, ::prost::Enumeration)] +#[repr(i32)] +pub enum Direction { + Asc = 0, + Desc = 1, +} +impl Direction { + /// String value of the enum field names used in the ProtoBuf definition. + /// + /// The values are not transformed in any way and thus are considered stable + /// (if the ProtoBuf definition does not change) and safe for programmatic use. + pub fn as_str_name(&self) -> &'static str { + match self { + Direction::Asc => "Asc", + Direction::Desc => "Desc", + } + } + /// Creates an enum from field names used in the ProtoBuf definition. + pub fn from_str_name(value: &str) -> ::core::option::Option { + match value { + "Asc" => Some(Self::Asc), + "Desc" => Some(Self::Desc), _ => None, } } @@ -6032,6 +6280,9 @@ pub struct SnapshotDescription { /// Size of the snapshot in bytes #[prost(int64, tag = "3")] pub size: i64, + /// SHA256 digest of the snapshot file + #[prost(string, optional, tag = "4")] + pub checksum: ::core::option::Option<::prost::alloc::string::String>, } #[allow(clippy::derive_partial_eq_without_eq)] #[derive(Clone, PartialEq, ::prost::Message)] @@ -6753,6 +7004,8 @@ pub struct HealthCheckReply { pub title: ::prost::alloc::string::String, #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, + #[prost(string, optional, tag = "3")] + pub commit: ::core::option::Option<::prost::alloc::string::String>, } /// Generated client implementations. pub mod qdrant_client { diff --git a/tests/integration-tests.sh b/tests/integration-tests.sh index 85aad2a..100e824 100755 --- a/tests/integration-tests.sh +++ b/tests/integration-tests.sh @@ -11,7 +11,7 @@ function stop_docker() # Ensure current path is project root cd "$(dirname "$0")/../" -QDRANT_VERSION='v1.6.0' +QDRANT_VERSION='v1.7.0' QDRANT_HOST='localhost:6333' From e501e2e48266d284689dacae97bd1a601263f4c4 Mon Sep 17 00:00:00 2001 From: Arnaud Gourlay Date: Fri, 1 Mar 2024 14:55:51 +0100 Subject: [PATCH 02/10] fix tests --- src/client.rs | 14 ++++++++++++++ src/lib.rs | 1 + tests/integration-tests.sh | 3 ++- 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index 3192f0c..fee354f 100644 --- a/src/client.rs +++ b/src/client.rs @@ -978,6 +978,7 @@ impl QdrantClient { shard_key_selector: Option>, points: &PointsSelector, payload: Payload, + payload_key: Option, ordering: Option, ) -> Result { self._set_payload( @@ -985,6 +986,7 @@ impl QdrantClient { shard_key_selector, points, &payload, + payload_key, false, ordering, ) @@ -997,6 +999,7 @@ impl QdrantClient { shard_key_selector: Option>, points: &PointsSelector, payload: Payload, + payload_key: Option, ordering: Option, ) -> Result { self._set_payload( @@ -1004,6 +1007,7 @@ impl QdrantClient { shard_key_selector, points, &payload, + payload_key, true, ordering, ) @@ -1017,6 +1021,7 @@ impl QdrantClient { shard_key_selector: Option>, points: &PointsSelector, payload: &Payload, + payload_key: Option, block: bool, ordering: Option, ) -> Result { @@ -1025,6 +1030,7 @@ impl QdrantClient { let ordering_ref = ordering.as_ref(); let shard_keys = shard_key_selector.map(ShardKeySelector::from); let shard_keys_ref = &shard_keys; + let payload_key_ref = payload_key.as_ref(); Ok(self .with_points_client(|mut points_api| async move { @@ -1036,6 +1042,7 @@ impl QdrantClient { points_selector: Some(points.clone()), ordering: ordering_ref.cloned(), shard_key_selector: shard_keys_ref.clone(), + key: payload_key_ref.cloned(), }) .await?; Ok(result.into_inner()) @@ -1049,6 +1056,7 @@ impl QdrantClient { shard_key_selector: Option>, points: &PointsSelector, payload: Payload, + payload_key: Option, ordering: Option, ) -> Result { self._overwrite_payload( @@ -1056,6 +1064,7 @@ impl QdrantClient { shard_key_selector, points, &payload, + payload_key, false, ordering, ) @@ -1068,6 +1077,7 @@ impl QdrantClient { shard_key_selector: Option>, points: &PointsSelector, payload: Payload, + payload_key: Option, ordering: Option, ) -> Result { self._overwrite_payload( @@ -1075,6 +1085,7 @@ impl QdrantClient { shard_key_selector, points, &payload, + payload_key, true, ordering, ) @@ -1088,6 +1099,7 @@ impl QdrantClient { shard_key_selector: Option>, points: &PointsSelector, payload: &Payload, + payload_key: Option, block: bool, ordering: Option, ) -> Result { @@ -1096,6 +1108,7 @@ impl QdrantClient { let ordering_ref = ordering.as_ref(); let shard_keys = shard_key_selector.map(ShardKeySelector::from); let shard_keys_ref = &shard_keys; + let payload_key_ref = payload_key.as_ref(); Ok(self .with_points_client(|mut points_api| async move { @@ -1107,6 +1120,7 @@ impl QdrantClient { points_selector: Some(points.clone()), ordering: ordering_ref.cloned(), shard_key_selector: shard_keys_ref.clone(), + key: payload_key_ref.cloned(), }) .await?; Ok(result.into_inner()) diff --git a/src/lib.rs b/src/lib.rs index 3f79e2a..8203a5c 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -550,6 +550,7 @@ mod tests { &vec![0.into()].into(), new_payload, None, + None, ) .await?; diff --git a/tests/integration-tests.sh b/tests/integration-tests.sh index 100e824..a2a2f6c 100755 --- a/tests/integration-tests.sh +++ b/tests/integration-tests.sh @@ -11,7 +11,8 @@ function stop_docker() # Ensure current path is project root cd "$(dirname "$0")/../" -QDRANT_VERSION='v1.7.0' +# TODO use v1.8.0 +QDRANT_VERSION='dev' QDRANT_HOST='localhost:6333' From e42a53e62ee1b09b1988c0545c9393c72145354b Mon Sep 17 00:00:00 2001 From: Arnaud Gourlay Date: Fri, 1 Mar 2024 15:00:59 +0100 Subject: [PATCH 03/10] clippy --- src/client.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index fee354f..dd59b67 100644 --- a/src/client.rs +++ b/src/client.rs @@ -266,7 +266,7 @@ impl From> for Vectors { fn from(named_vectors: HashMap) -> Self { Vectors { vectors_options: Some(VectorsOptions::Vectors(NamedVectors { - vectors: named_vectors.into_iter().map(|(k, v)| (k, v)).collect(), + vectors: named_vectors.into_iter().collect(), })), } } @@ -1015,6 +1015,7 @@ impl QdrantClient { } #[inline] + #[allow(clippy::too_many_arguments)] async fn _set_payload( &self, collection_name: impl ToString, @@ -1093,6 +1094,7 @@ impl QdrantClient { } #[inline] + #[allow(clippy::too_many_arguments)] async fn _overwrite_payload( &self, collection_name: impl ToString, From c3c83aba8f4a81d72dd446299f4a6f8450af58b0 Mon Sep 17 00:00:00 2001 From: Arnaud Gourlay Date: Fri, 1 Mar 2024 15:05:02 +0100 Subject: [PATCH 04/10] bump deps --- Cargo.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7d91954..70a8879 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -18,12 +18,12 @@ anyhow = "1" serde = { version = "1", features = ["derive"], optional = true } serde_json = { version = "1", optional = true } -reqwest = { version = "0.11.22", optional = true, default-features = false, features = ["stream", "rustls-tls"] } -futures-util = { version = "0.3.29", optional = true } +reqwest = { version = "0.11.24", optional = true, default-features = false, features = ["stream", "rustls-tls"] } +futures-util = { version = "0.3.30", optional = true } [dev-dependencies] tonic-build = { version = "0.9.2", features = ["prost"] } -tokio = { version = "1.34.0", features = ["rt-multi-thread"] } +tokio = { version = "1.36.0", features = ["rt-multi-thread"] } [features] default = ["download_snapshots", "serde"] From 3a4911cf9416c33ef8219b3add901dc6d50a217a Mon Sep 17 00:00:00 2001 From: Arnaud Gourlay Date: Fri, 1 Mar 2024 16:39:13 +0100 Subject: [PATCH 05/10] add collection_exists --- src/client.rs | 33 ++++++++++++++++++++++++++------- src/lib.rs | 3 +++ 2 files changed, 29 insertions(+), 7 deletions(-) diff --git a/src/client.rs b/src/client.rs index dd59b67..18ab03e 100644 --- a/src/client.rs +++ b/src/client.rs @@ -12,13 +12,14 @@ use crate::qdrant::with_payload_selector::SelectorOptions; use crate::qdrant::{ qdrant_client, shard_key, with_vectors_selector, AliasOperations, ChangeAliases, ClearPayloadPoints, CollectionClusterInfoRequest, CollectionClusterInfoResponse, - CollectionOperationResponse, CollectionParamsDiff, CountPoints, CountResponse, CreateAlias, - CreateCollection, CreateFieldIndexCollection, CreateFullSnapshotRequest, CreateShardKey, - CreateShardKeyRequest, CreateShardKeyResponse, CreateSnapshotRequest, CreateSnapshotResponse, - DeleteAlias, DeleteCollection, DeleteFieldIndexCollection, DeleteFullSnapshotRequest, - DeletePayloadPoints, DeletePointVectors, DeletePoints, DeleteShardKey, DeleteShardKeyRequest, - DeleteShardKeyResponse, DeleteSnapshotRequest, DeleteSnapshotResponse, DiscoverBatchPoints, - DiscoverBatchResponse, DiscoverPoints, DiscoverResponse, FieldType, GetCollectionInfoRequest, + CollectionExistsRequest, CollectionOperationResponse, CollectionParamsDiff, CountPoints, + CountResponse, CreateAlias, CreateCollection, CreateFieldIndexCollection, + CreateFullSnapshotRequest, CreateShardKey, CreateShardKeyRequest, CreateShardKeyResponse, + CreateSnapshotRequest, CreateSnapshotResponse, DeleteAlias, DeleteCollection, + DeleteFieldIndexCollection, DeleteFullSnapshotRequest, DeletePayloadPoints, DeletePointVectors, + DeletePoints, DeleteShardKey, DeleteShardKeyRequest, DeleteShardKeyResponse, + DeleteSnapshotRequest, DeleteSnapshotResponse, DiscoverBatchPoints, DiscoverBatchResponse, + DiscoverPoints, DiscoverResponse, FieldType, GetCollectionInfoRequest, GetCollectionInfoResponse, GetPoints, GetResponse, HealthCheckReply, HealthCheckRequest, HnswConfigDiff, ListAliasesRequest, ListAliasesResponse, ListCollectionAliasesRequest, ListCollectionsRequest, ListCollectionsResponse, ListFullSnapshotsRequest, @@ -484,6 +485,7 @@ impl QdrantClient { .await?) } + #[deprecated(since = "1.8.0", note = "Please use the `collection_exists` instead")] pub async fn has_collection(&self, collection_name: impl ToString) -> Result { let collection_name = collection_name.to_string(); let response = self.list_collections().await?; @@ -495,6 +497,23 @@ impl QdrantClient { Ok(result) } + pub async fn collection_exists(&self, collection_name: impl ToString) -> Result { + let collection_name_ref = &collection_name.to_string(); + Ok(self + .with_collections_client(|mut collection_api| async move { + let request = CollectionExistsRequest { + collection_name: collection_name_ref.clone(), + }; + let result = collection_api.collection_exists(request).await?; + Ok(result + .into_inner() + .result + .map(|r| r.exists) + .unwrap_or(false)) + }) + .await?) + } + pub async fn create_collection( &self, details: &CreateCollection, diff --git a/src/lib.rs b/src/lib.rs index 8203a5c..2531988 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -487,6 +487,9 @@ mod tests { }) .await?; + let exists = client.collection_exists(collection_name).await?; + assert!(exists); + let collection_info = client.collection_info(collection_name).await?; println!("{:#?}", collection_info); From 412ed7b495729d9041f8ee29520d083b78e286f4 Mon Sep 17 00:00:00 2001 From: Albert Safin Date: Tue, 5 Mar 2024 02:07:32 +0000 Subject: [PATCH 06/10] Add Condition::datetime_range, re-export prost_types::Timestamp --- src/filters.rs | 24 +++++++++++++++++++++++- src/lib.rs | 3 +++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/src/filters.rs b/src/filters.rs index 2812944..311734e 100644 --- a/src/filters.rs +++ b/src/filters.rs @@ -1,7 +1,7 @@ -use crate::qdrant; use crate::qdrant::condition::ConditionOneOf; use crate::qdrant::points_selector::PointsSelectorOneOf; use crate::qdrant::r#match::MatchValue; +use crate::qdrant::{self, DatetimeRange}; use crate::qdrant::{ Condition, FieldCondition, Filter, GeoBoundingBox, GeoPolygon, GeoRadius, HasIdCondition, IsEmptyCondition, IsNullCondition, NestedCondition, PointId, PointsSelector, Range, @@ -225,6 +225,28 @@ impl qdrant::Condition { } } + /// create a Condition that checks datetime fields against a range + /// + /// # Examples: + /// + /// ``` + /// use qdrant_client::qdrant::DatetimeRange; + /// use qdrant_client::Timestamp; + /// qdrant_client::qdrant::Condition::datetime_range("timestamp", DatetimeRange { + /// gte: Some(Timestamp::date(2023, 2, 8).unwrap()), + /// ..Default::default() + /// }); + /// ``` + pub fn datetime_range(field: impl Into, range: DatetimeRange) -> Self { + Self { + condition_one_of: Some(ConditionOneOf::Field(qdrant::FieldCondition { + key: field.into(), + datetime_range: Some(range), + ..Default::default() + })), + } + } + /// create a Condition that checks geo fields against a radius /// /// # Examples: diff --git a/src/lib.rs b/src/lib.rs index 2531988..99d2adb 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -119,6 +119,9 @@ use std::error::Error; use std::fmt::{Debug, Display, Formatter}; use std::hash::{Hash, Hasher}; +#[doc(no_inline)] +pub use prost_types::Timestamp; + static NULL_VALUE: Value = Value { kind: Some(NullValue(0)), }; From 076e05b4dbc62f53829ea06f9cf4438118f615a8 Mon Sep 17 00:00:00 2001 From: Arnaud Gourlay Date: Tue, 5 Mar 2024 09:31:24 +0100 Subject: [PATCH 07/10] syntax --- src/client.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client.rs b/src/client.rs index 18ab03e..c05ea6e 100644 --- a/src/client.rs +++ b/src/client.rs @@ -485,7 +485,7 @@ impl QdrantClient { .await?) } - #[deprecated(since = "1.8.0", note = "Please use the `collection_exists` instead")] + #[deprecated(since = "1.8.0", note = "Please use `collection_exists` instead")] pub async fn has_collection(&self, collection_name: impl ToString) -> Result { let collection_name = collection_name.to_string(); let response = self.list_collections().await?; From 452e4bf784a1a793d102e2424b592ef62967f5d6 Mon Sep 17 00:00:00 2001 From: Arnaud Gourlay Date: Tue, 5 Mar 2024 15:38:48 +0100 Subject: [PATCH 08/10] syntax for min_should --- src/filters.rs | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/src/filters.rs b/src/filters.rs index 311734e..214abdc 100644 --- a/src/filters.rs +++ b/src/filters.rs @@ -1,7 +1,7 @@ use crate::qdrant::condition::ConditionOneOf; use crate::qdrant::points_selector::PointsSelectorOneOf; use crate::qdrant::r#match::MatchValue; -use crate::qdrant::{self, DatetimeRange}; +use crate::qdrant::{self, DatetimeRange, MinShould}; use crate::qdrant::{ Condition, FieldCondition, Filter, GeoBoundingBox, GeoPolygon, GeoRadius, HasIdCondition, IsEmptyCondition, IsNullCondition, NestedCondition, PointId, PointsSelector, Range, @@ -91,7 +91,7 @@ impl qdrant::Filter { }) } - /// create a Filter where all of the conditions must be satisfied + /// create a Filter where all the conditions must be satisfied pub fn must(conds: impl IntoIterator) -> Self { Self { must: conds.into_iter().collect(), @@ -107,6 +107,17 @@ impl qdrant::Filter { } } + /// create a Filter where at least a minimum amount of given conditions should be statisfied + pub fn min_should(min_count: u64, conds: impl IntoIterator) -> Self { + Self { + min_should: Some(MinShould { + min_count, + conditions: conds.into_iter().collect(), + }), + ..Default::default() + } + } + /// create a Filter where none of the conditions must be satisfied pub fn must_not(conds: impl IntoIterator) -> Self { Self { From af0e0a64418829627574d7e77d8f86aff307704b Mon Sep 17 00:00:00 2001 From: Arnaud Gourlay Date: Tue, 5 Mar 2024 16:05:05 +0100 Subject: [PATCH 09/10] bump 1.8.0 --- Cargo.toml | 2 +- tests/integration-tests.sh | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 70a8879..38f0173 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "qdrant-client" -version = "1.7.0" +version = "1.8.0" edition = "2021" authors = ["Qdrant Team "] description = "Rust client for Qdrant Vector Search Engine" diff --git a/tests/integration-tests.sh b/tests/integration-tests.sh index a2a2f6c..40be3a6 100755 --- a/tests/integration-tests.sh +++ b/tests/integration-tests.sh @@ -11,8 +11,7 @@ function stop_docker() # Ensure current path is project root cd "$(dirname "$0")/../" -# TODO use v1.8.0 -QDRANT_VERSION='dev' +QDRANT_VERSION='v1.8.0' QDRANT_HOST='localhost:6333' From e046a3c4ba4b05d90d5fd21c13a3d4c9e056fafb Mon Sep 17 00:00:00 2001 From: Arnaud Gourlay Date: Wed, 6 Mar 2024 14:16:22 +0100 Subject: [PATCH 10/10] revert crate version --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 38f0173..bbbe42e 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "qdrant-client" -version = "1.8.0" +version = "1.6.0" edition = "2021" authors = ["Qdrant Team "] description = "Rust client for Qdrant Vector Search Engine"