Skip to content

Commit

Permalink
enhance: knowhere support data view index node
Browse files Browse the repository at this point in the history
Signed-off-by: cqy123456 <[email protected]>
  • Loading branch information
cqy123456 committed Jan 8, 2025
1 parent 9a6a8df commit d9dc4cb
Show file tree
Hide file tree
Showing 20 changed files with 1,822 additions and 101 deletions.
3 changes: 2 additions & 1 deletion include/knowhere/comp/index_param.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@ constexpr const char* INDEX_FAISS_IVFFLAT = "IVF_FLAT";
constexpr const char* INDEX_FAISS_IVFFLAT_CC = "IVF_FLAT_CC";
constexpr const char* INDEX_FAISS_IVFPQ = "IVF_PQ";
constexpr const char* INDEX_FAISS_SCANN = "SCANN";
constexpr const char* INDEX_FAISS_SCANN_WITH_DV_REFINER = "SCANN_WITH_DV_REFINER";
constexpr const char* INDEX_FAISS_IVFSQ8 = "IVF_SQ8";
constexpr const char* INDEX_FAISS_IVFSQ_CC = "IVF_SQ_CC";

Expand Down Expand Up @@ -118,7 +119,7 @@ constexpr const char* WITH_RAW_DATA = "with_raw_data";
constexpr const char* ENSURE_TOPK_FULL = "ensure_topk_full";
constexpr const char* CODE_SIZE = "code_size";
constexpr const char* RAW_DATA_STORE_PREFIX = "raw_data_store_prefix";

constexpr const char* SUB_DIM = "sub_dim";
// RAFT Params
constexpr const char* REFINE_RATIO = "refine_ratio";
constexpr const char* CACHE_DATASET_ON_DEVICE = "cache_dataset_on_device";
Expand Down
22 changes: 12 additions & 10 deletions include/knowhere/index/index_node.h
Original file line number Diff line number Diff line change
Expand Up @@ -524,8 +524,16 @@ class IndexIterator : public IndexNode::iterator {
}

protected:
inline size_t
min_refine_size() const {
// TODO: maybe make this configurable
return std::max((size_t)20, (size_t)(res_.size() * refine_ratio_));
}

virtual void
next_batch(std::function<void(const std::vector<DistId>&)> batch_handler) = 0;
next_batch(std::function<void(const std::vector<DistId>&)> batch_handler) {
throw std::runtime_error("next_batch not implemented");
}
// will be called only if refine_ratio_ is not 0.
virtual float
raw_distance(int64_t) {
Expand All @@ -537,18 +545,15 @@ class IndexIterator : public IndexNode::iterator {

const float refine_ratio_;
const bool refine_;
bool initialized_ = false;
bool retain_iterator_order_ = false;
const int64_t sign_;

std::priority_queue<DistId, std::vector<DistId>, std::greater<DistId>> res_;
// unused if refine_ is false
std::priority_queue<DistId, std::vector<DistId>, std::greater<DistId>> refined_res_;

private:
inline size_t
min_refine_size() const {
// TODO: maybe make this configurable
return std::max((size_t)20, (size_t)(res_.size() * refine_ratio_));
}

void
UpdateNext() {
auto batch_handler = [this](const std::vector<DistId>& batch) {
Expand All @@ -569,10 +574,7 @@ class IndexIterator : public IndexNode::iterator {
next_batch(batch_handler);
}

bool initialized_ = false;
bool retain_iterator_order_ = false;
bool use_knowhere_search_pool_ = true;
const int64_t sign_;
};

// An iterator implementation that accepts a function to get distances and ids list and returns them in order.
Expand Down
7 changes: 5 additions & 2 deletions include/knowhere/object.h
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@

#include <atomic>
#include <cassert>
#include <functional>
#include <iostream>
#include <memory>

Expand Down Expand Up @@ -73,11 +74,13 @@ class Object {
mutable std::atomic_uint32_t ref_counts_ = 1;
};

using ViewDataOp = std::function<const void*(size_t)>;

template <typename T>
class Pack : public Object {
static_assert(std::is_same_v<T, std::shared_ptr<knowhere::FileManager>>,
static_assert(std::is_same_v<T, std::shared_ptr<knowhere::FileManager>> || std::is_same_v<T, knowhere::ViewDataOp>,
"IndexPack only support std::shared_ptr<knowhere::FileManager> by far.");

// todo: pack can hold more object
public:
Pack() {
}
Expand Down
31 changes: 31 additions & 0 deletions include/knowhere/operands.h
Original file line number Diff line number Diff line change
Expand Up @@ -196,5 +196,36 @@ template <>
struct MockData<knowhere::int8> {
using type = knowhere::fp32;
};

//
enum class DataFormatEnum { fp32, fp16, bf16, int8, bin1 };

template <typename T>
struct DataType2EnumHelper {};

template <>
struct DataType2EnumHelper<knowhere::fp32> {
static constexpr DataFormatEnum value = DataFormatEnum::fp32;
};
template <>
struct DataType2EnumHelper<knowhere::fp16> {
static constexpr DataFormatEnum value = DataFormatEnum::fp16;
};
template <>
struct DataType2EnumHelper<knowhere::bf16> {
static constexpr DataFormatEnum value = DataFormatEnum::bf16;
};
template <>
struct DataType2EnumHelper<knowhere::int8> {
static constexpr DataFormatEnum value = DataFormatEnum::int8;
};
template <>
struct DataType2EnumHelper<knowhere::bin1> {
static constexpr DataFormatEnum value = DataFormatEnum::bin1;
};

template <typename T>
static constexpr DataFormatEnum datatype_v = DataType2EnumHelper<T>::value;

} // namespace knowhere
#endif /* OPERANDS_H */
45 changes: 33 additions & 12 deletions include/knowhere/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,14 @@ IsFlatIndex(const knowhere::IndexType& index_type) {
return std::find(flat_index_list.begin(), flat_index_list.end(), index_type) != flat_index_list.end();
}

template <typename DataType>
float
GetL2Norm(const DataType* x, int32_t d);

template <typename DataType>
std::vector<float>
GetL2Norms(const DataType* x, int32_t d, int32_t n);

template <typename DataType>
extern float
NormalizeVec(DataType* x, int32_t d);
Expand All @@ -52,6 +60,10 @@ template <typename DataType>
extern void
NormalizeDataset(const DataSetPtr dataset);

template <typename DataType>
extern std::tuple<DataSetPtr, std::vector<float>>
CopyAndNormalizeDataset(const DataSetPtr dataset);

constexpr inline uint64_t seed = 0xc70f6907UL;

inline uint64_t
Expand Down Expand Up @@ -112,8 +124,10 @@ GetKey(const std::string& name) {
template <typename InType, typename OutType>
inline DataSetPtr
data_type_conversion(const DataSet& src, const std::optional<int64_t> start = std::nullopt,
const std::optional<int64_t> count = std::nullopt) {
auto dim = src.GetDim();
const std::optional<int64_t> count = std::nullopt,
const std::optional<int64_t> filling_dim = std::nullopt) {
auto in_dim = src.GetDim();
auto out_dim = filling_dim.value_or(in_dim);
auto rows = src.GetRows();

// check the acceptable range
Expand All @@ -128,15 +142,18 @@ data_type_conversion(const DataSet& src, const std::optional<int64_t> start = st
}

// map
auto* des_data = new OutType[dim * count_rows];
auto* des_data = new OutType[out_dim * count_rows];
std::memset(des_data, 0, sizeof(OutType) * out_dim * count_rows);
auto* src_data = (const InType*)src.GetTensor();
for (auto i = 0; i < dim * count_rows; i++) {
des_data[i] = (OutType)src_data[i + start_row * dim];
for (auto i = 0; i < count_rows; i++) {
for (auto d = 0; d < in_dim; d++) {
des_data[i * out_dim + d] = (OutType)src_data[(start_row + i) * in_dim + d];
}
}

auto des = std::make_shared<DataSet>();
des->SetRows(count_rows);
des->SetDim(dim);
des->SetDim(out_dim);
des->SetTensor(des_data);
des->SetIsOwner(true);
return des;
Expand All @@ -152,28 +169,32 @@ data_type_conversion(const DataSet& src, const std::optional<int64_t> start = st
template <typename DataType>
inline DataSetPtr
ConvertFromDataTypeIfNeeded(const DataSetPtr& ds, const std::optional<int64_t> start = std::nullopt,
const std::optional<int64_t> count = std::nullopt) {
const std::optional<int64_t> count = std::nullopt,
const std::optional<int64_t> filling_dim = std::nullopt) {
if constexpr (std::is_same_v<DataType, typename MockData<DataType>::type>) {
if (!start.has_value() && !count.has_value()) {
if (!start.has_value() && !count.has_value() &&
(!filling_dim.has_value() || ds->GetDim() == filling_dim.value())) {
return ds;
}
}

return data_type_conversion<DataType, typename MockData<DataType>::type>(*ds, start, count);
return data_type_conversion<DataType, typename MockData<DataType>::type>(*ds, start, count, filling_dim);
}

// Convert DataSet from float to DataType
template <typename DataType>
inline DataSetPtr
ConvertToDataTypeIfNeeded(const DataSetPtr& ds, const std::optional<int64_t> start = std::nullopt,
const std::optional<int64_t> count = std::nullopt) {
const std::optional<int64_t> count = std::nullopt,
const std::optional<int64_t> filling_dim = std::nullopt) {
if constexpr (std::is_same_v<DataType, typename MockData<DataType>::type>) {
if (!start.has_value() && !count.has_value()) {
if (!start.has_value() && !count.has_value() &&
(!filling_dim.has_value() || ds->GetDim() == filling_dim.value())) {
return ds;
}
}

return data_type_conversion<typename MockData<DataType>::type, DataType>(*ds, start, count);
return data_type_conversion<typename MockData<DataType>::type, DataType>(*ds, start, count, filling_dim);
}

template <typename T>
Expand Down
71 changes: 67 additions & 4 deletions src/common/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,38 @@
namespace knowhere {

const float FloatAccuracy = 0.00001;
template <typename DataType>
float
GetL2Norm(const DataType* x, int32_t d) {
float norm_l2_sqr = 0.0;
if constexpr (std::is_same_v<DataType, fp32>) {
norm_l2_sqr = faiss::fvec_norm_L2sqr(x, d);
} else if constexpr (std::is_same_v<DataType, fp16>) {
norm_l2_sqr = faiss::fp16_vec_norm_L2sqr(x, d);
} else if constexpr (std::is_same_v<DataType, bf16>) {
norm_l2_sqr = faiss::bf16_vec_norm_L2sqr(x, d);
} else {
KNOWHERE_THROW_MSG("Unknown Datatype");
}

if (norm_l2_sqr > 0 && std::abs(1.0f - norm_l2_sqr) > FloatAccuracy) {
float norm_l2 = std::sqrt(norm_l2_sqr);
return norm_l2;
}
return 1.0f;
}

template <typename DataType>
std::vector<float>
GetL2Norms(const DataType* x, int32_t d, int32_t n) {
std::vector<float> norms(n);
for (auto i = 0; i < n; i++) {
auto x_i = x + d * i;
norms[i] = GetL2Norm(x_i, d);
}
return norms;
}
// normalize one vector and return its norm
// todo(cqy123456): Template specialization for fp16/bf16;
// float16 uses the smallest representable positive float16 value(6.1 x 10^(-5)) as FloatAccuracy;
// bfloat16 uses the same FloatAccuracy as float32;
template <typename DataType>
float
NormalizeVec(DataType* x, int32_t d) {
Expand Down Expand Up @@ -83,10 +110,26 @@ NormalizeDataset(const DataSetPtr dataset) {
auto data = (DataType*)dataset->GetTensor();

LOG_KNOWHERE_DEBUG_ << "vector normalize, rows " << rows << ", dim " << dim;

NormalizeVecs<DataType>(data, rows, dim);
}

template <typename DataType>
std::tuple<DataSetPtr, std::vector<float>>
CopyAndNormalizeDataset(const DataSetPtr dataset) {
auto rows = dataset->GetRows();
auto dim = dataset->GetDim();
auto data = (DataType*)dataset->GetTensor();

LOG_KNOWHERE_DEBUG_ << "vector normalize, rows " << rows << ", dim " << dim;

auto x_normalized = new DataType[rows * dim];
std::copy_n(data, rows * dim, x_normalized);
auto norms = NormalizeVecs<DataType>(x_normalized, rows, dim);
auto normalize_bs = GenDataSet(rows, dim, x_normalized);
normalize_bs->SetIsOwner(true);
return std::make_tuple(normalize_bs, norms);
}

void
ConvertIVFFlat(const BinarySet& binset, const MetricType metric_type, const uint8_t* raw_data, const size_t raw_size) {
std::vector<std::string> names = {"IVF", // compatible with knowhere-1.x
Expand Down Expand Up @@ -135,6 +178,20 @@ UseDiskLoad(const std::string& index_type, const int32_t& version) {
#endif
}

template float
GetL2Norm<fp32>(const fp32* x, int32_t d);
template float
GetL2Norm<fp16>(const fp16* x, int32_t d);
template float
GetL2Norm<bf16>(const bf16* x, int32_t d);

template std::vector<float>
GetL2Norms<fp32>(const fp32* x, int32_t d, int32_t n);
template std::vector<float>
GetL2Norms<fp16>(const fp16* x, int32_t d, int32_t n);
template std::vector<float>
GetL2Norms<bf16>(const bf16* x, int32_t d, int32_t n);

template float
NormalizeVec<fp32>(fp32* x, int32_t d);
template float
Expand Down Expand Up @@ -163,4 +220,10 @@ NormalizeDataset<fp16>(const DataSetPtr dataset);
template void
NormalizeDataset<bf16>(const DataSetPtr dataset);

template std::tuple<DataSetPtr, std::vector<float>>
CopyAndNormalizeDataset<fp32>(const DataSetPtr dataset);
template std::tuple<DataSetPtr, std::vector<float>>
CopyAndNormalizeDataset<fp16>(const DataSetPtr dataset);
template std::tuple<DataSetPtr, std::vector<float>>
CopyAndNormalizeDataset<bf16>(const DataSetPtr dataset);
} // namespace knowhere
Loading

0 comments on commit d9dc4cb

Please sign in to comment.