From 081b29bd2a3d91e7772e3910ce223dd63b8d7d26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Tue, 17 Dec 2024 19:09:35 +0100 Subject: [PATCH] tests: add tests for GGUF (#10830) --- ggml/src/ggml-impl.h | 16 + ggml/src/ggml.c | 65 +-- tests/CMakeLists.txt | 1 + tests/test-gguf.cpp | 1303 ++++++++++++++++++++++++++++++++++++++++++ 4 files changed, 1344 insertions(+), 41 deletions(-) create mode 100644 tests/test-gguf.cpp diff --git a/ggml/src/ggml-impl.h b/ggml/src/ggml-impl.h index f961134edd735..549772c57c90a 100644 --- a/ggml/src/ggml-impl.h +++ b/ggml/src/ggml-impl.h @@ -551,6 +551,22 @@ static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) { #define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x) #define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x) +// expose GGUF internals for test code + +GGML_API size_t gguf_type_size(enum gguf_type type); + +GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params); + +struct gguf_buf { + void * data; + size_t size; + size_t offset; +}; +GGML_API struct gguf_buf gguf_buf_init(size_t size); +GGML_API void gguf_buf_free(struct gguf_buf buf); + +GGML_API void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta); + #ifdef __cplusplus } #endif diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 030d93a5177c0..0efd2b2ebf780 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -6489,7 +6489,7 @@ struct gguf_context { void * data; }; -static size_t gguf_type_size(enum gguf_type type) { +size_t gguf_type_size(enum gguf_type type) { GGML_ASSERT(0 <= type && type < GGUF_TYPE_COUNT); return GGUF_TYPE_SIZE[type]; } @@ -6617,13 +6617,7 @@ struct gguf_context * gguf_init_empty(void) { return ctx; } -struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) { - FILE * file = ggml_fopen(fname, "rb"); - if (!file) { - fprintf(stderr, "%s: failed to open '%s': '%s'\n", __func__, fname, strerror(errno)); - return NULL; - } - +struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params) { // offset from start of file size_t offset = 0; @@ -6636,7 +6630,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p for (uint32_t i = 0; i < sizeof(magic); i++) { if (magic[i] != GGUF_MAGIC[i]) { fprintf(stderr, "%s: invalid magic characters '%c%c%c%c'\n", __func__, magic[0], magic[1], magic[2], magic[3]); - fclose(file); return NULL; } } @@ -6647,7 +6640,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p struct gguf_context * ctx = calloc(1, sizeof(struct gguf_context)); if (!ctx) { fprintf(stderr, "%s: failed to allocate memory for context\n", __func__); - fclose(file); return NULL; } @@ -6665,7 +6657,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (ctx->header.version == 1) { fprintf(stderr, "%s: GGUFv1 is no longer supported. please use a more up-to-date version\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6678,7 +6669,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { fprintf(stderr, "%s: failed to read header\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6688,12 +6678,13 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p { const uint64_t n_kv = ctx->header.n_kv; - ctx->kv = calloc(n_kv, sizeof(struct gguf_kv)); - if (!ctx->kv) { - fprintf(stderr, "%s: failed to allocate memory for kv pairs\n", __func__); - fclose(file); - gguf_free(ctx); - return NULL; + if (n_kv > 0) { + ctx->kv = calloc(n_kv, sizeof(struct gguf_kv)); + if (!ctx->kv) { + fprintf(stderr, "%s: failed to allocate memory for kv pairs\n", __func__); + gguf_free(ctx); + return NULL; + } } for (uint64_t i = 0; i < n_kv; ++i) { @@ -6740,7 +6731,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p // prevent from integer overflow in the malloc below if (kv->value.arr.n >= SIZE_MAX/gguf_type_size(kv->value.arr.type)) { fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n); - fclose(file); gguf_free(ctx); return NULL; } @@ -6748,7 +6738,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p kv->value.arr.data = calloc(kv->value.arr.n, gguf_type_size(kv->value.arr.type)); if (!kv->value.arr.data) { fprintf(stderr, "%s: failed to allocate memory for array\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6760,7 +6749,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p // prevent from integer overflow in the malloc below if (kv->value.arr.n >= SIZE_MAX/sizeof(struct gguf_str)) { fprintf(stderr, "%s: array size is too large (%" PRIu64 ")\n", __func__, kv->value.arr.n); - fclose(file); gguf_free(ctx); return NULL; } @@ -6768,7 +6756,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p kv->value.arr.data = calloc(kv->value.arr.n, sizeof(struct gguf_str)); if (!kv->value.arr.data) { fprintf(stderr, "%s: failed to allocate memory for array\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6799,7 +6786,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { fprintf(stderr, "%s: failed to read key-value pairs\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6810,7 +6796,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p ctx->infos = calloc(ctx->header.n_tensors, sizeof(struct gguf_tensor_info)); if (!ctx->infos) { fprintf(stderr, "%s: failed to allocate memory for tensor infos\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6846,7 +6831,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { fprintf(stderr, "%s: failed to read tensor info\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6889,7 +6873,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p // this tensor type support have been removed: fprintf(stderr, "%s: tensor '%s' of type %d: %s\n", __func__, info->name.data, (int) info->type, ggml_type_name(info->type)); - fclose(file); gguf_free(ctx); return NULL; } @@ -6897,7 +6880,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (ne % ggml_blck_size(info->type) != 0) { fprintf(stderr, "%s: tensor '%s' of type %d (%s) number of elements (%" PRId64 ") is not a multiple of block size (%" PRId64 ")\n", __func__, info->name.data, (int) info->type, ggml_type_name(info->type), ne, ggml_blck_size(info->type)); - fclose(file); gguf_free(ctx); return NULL; } @@ -6929,7 +6911,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p *params.ctx = ggml_init(pdata); if (*params.ctx == NULL) { fprintf(stderr, "%s: failed to initialize context\n", __func__); - fclose(file); gguf_free(ctx); return NULL; } @@ -6948,7 +6929,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { fprintf(stderr, "%s: failed to read tensor data\n", __func__); - fclose(file); ggml_free(ctx_data); gguf_free(ctx); return NULL; @@ -6987,7 +6967,6 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p if (!ok) { fprintf(stderr, "%s: failed to read the tensor data\n", __func__); - fclose(file); ggml_free(ctx_data); gguf_free(ctx); return NULL; @@ -6996,11 +6975,21 @@ struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_p ggml_set_no_alloc(ctx_data, params.no_alloc); } - fclose(file); - return ctx; } +struct gguf_context * gguf_init_from_file(const char * fname, struct gguf_init_params params) { + FILE * file = ggml_fopen(fname, "rb"); + if (!file) { + fprintf(stderr, "%s: failed to open '%s': '%s'\n", __func__, fname, strerror(errno)); + return NULL; + } + + struct gguf_context * result = gguf_init_from_file_impl(file, params); + fclose(file); + return result; +} + void gguf_free(struct gguf_context * ctx) { if (ctx == NULL) { return; @@ -7460,13 +7449,7 @@ void gguf_set_tensor_data(struct gguf_context * ctx, const char * name, const vo // fwrite(val, sizeof(char), size, file); //} -struct gguf_buf { - void * data; - size_t size; - size_t offset; -}; - -static struct gguf_buf gguf_buf_init(size_t size) { +struct gguf_buf gguf_buf_init(size_t size) { struct gguf_buf buf = { /*buf.data =*/ size == 0 ? NULL : GGML_CALLOC(1, size), /*buf.size =*/ size, @@ -7476,7 +7459,7 @@ static struct gguf_buf gguf_buf_init(size_t size) { return buf; } -static void gguf_buf_free(struct gguf_buf buf) { +void gguf_buf_free(struct gguf_buf buf) { if (buf.data) { GGML_FREE(buf.data); } @@ -7514,7 +7497,7 @@ static void gguf_bwrite_el(struct gguf_buf * buf, const void * val, size_t el_si buf->offset += el_size; } -static void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) { +void gguf_write_to_buf(const struct gguf_context * ctx, struct gguf_buf * buf, bool only_meta) { // write header gguf_bwrite_el(buf, &ctx->header.magic, sizeof(ctx->header.magic)); gguf_bwrite_el(buf, &ctx->header.version, sizeof(ctx->header.version)); diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index daeed4564c1d1..2b5e5fd4abe95 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -129,6 +129,7 @@ llama_target_and_test(test-arg-parser.cpp) llama_target_and_test(test-chat-template.cpp) # llama_target_and_test(test-opt.cpp) # SLOW +llama_target_and_test(test-gguf.cpp) llama_target_and_test(test-backend-ops.cpp) llama_target_and_test(test-model-load-cancel.cpp LABEL "model") diff --git a/tests/test-gguf.cpp b/tests/test-gguf.cpp new file mode 100644 index 0000000000000..e5b4cb7b8af1c --- /dev/null +++ b/tests/test-gguf.cpp @@ -0,0 +1,1303 @@ +#include "ggml.h" +#include "ggml-backend.h" +#include "../ggml/src/ggml-impl.h" + +#include +#include +#include +#include +#include +#include +#include + +constexpr int offset_has_kv = 1000; +constexpr int offset_has_tensors = 2000; +constexpr int offset_has_data = 3000; + +enum handcrafted_file_type { + HANDCRAFTED_HEADER_BAD_MAGIC = 10, + HANDCRAFTED_HEADER_BAD_VERSION_1 = 20, + HANDCRAFTED_HEADER_BAD_VERSION_FUTURE = 30, + HANDCRAFTED_HEADER_BAD_N_TENSORS = 40, + HANDCRAFTED_HEADER_BAD_N_KV = 50, + HANDCRAFTED_HEADER_EMPTY = 800, + + HANDCRAFTED_KV_BAD_KEY_SIZE = 10 + offset_has_kv, + HANDCRAFTED_KV_BAD_TYPE = 20 + offset_has_kv, + HANDCRAFTED_KV_BAD_VALUE_SIZE = 30 + offset_has_kv, + HANDCRAFTED_KV_DUPLICATE_KEY = 40 + offset_has_kv, + HANDCRAFTED_KV_SUCCESS = 800 + offset_has_kv, + + HANDCRAFTED_TENSORS_BAD_NAME_SIZE = 10 + offset_has_tensors, + HANDCRAFTED_TENSORS_BAD_N_DIMS = 20 + offset_has_tensors, + HANDCRAFTED_TENSORS_BAD_SHAPE = 30 + offset_has_tensors, + HANDCRAFTED_TENSORS_NE_TOO_BIG = 40 + offset_has_tensors, + HANDCRAFTED_TENSORS_BAD_TYPE = 50 + offset_has_tensors, + HANDCRAFTED_TENSORS_BAD_OFFSET = 60 + offset_has_tensors, + HANDCRAFTED_TENSORS_DUPLICATE_NAME = 70 + offset_has_tensors, + HANDCRAFTED_TENSORS_BAD_ALIGNMENT = 80 + offset_has_tensors, + HANDCRAFTED_TENSORS_SUCCESS = 800 + offset_has_tensors, + HANDCRAFTED_TENSORS_CUSTOM_ALIGN = 810 + offset_has_tensors, + + HANDCRAFTED_DATA_NOT_ENOUGH_DATA = 10 + offset_has_data, + HANDCRAFTED_DATA_BAD_ALIGNMENT = 20 + offset_has_data, + HANDCRAFTED_DATA_SUCCESS = 800 + offset_has_data, + HANDCRAFTED_DATA_CUSTOM_ALIGN = 810 + offset_has_data, +}; + +std::string handcrafted_file_type_name(const enum handcrafted_file_type hft) { + switch (hft) { + case HANDCRAFTED_HEADER_BAD_MAGIC: return "HEADER_BAD_MAGIC"; + case HANDCRAFTED_HEADER_BAD_VERSION_1: return "HEADER_BAD_VERSION_1"; + case HANDCRAFTED_HEADER_BAD_VERSION_FUTURE: return "HEADER_BAD_VERSION_FUTURE"; + case HANDCRAFTED_HEADER_BAD_N_KV: return "HEADER_BAD_N_KV"; + case HANDCRAFTED_HEADER_BAD_N_TENSORS: return "HEADER_BAD_N_TENSORS"; + case HANDCRAFTED_HEADER_EMPTY: return "HEADER_EMPTY"; + + case HANDCRAFTED_KV_BAD_KEY_SIZE: return "KV_BAD_KEY_SIZE"; + case HANDCRAFTED_KV_BAD_TYPE: return "KV_BAD_TYPE"; + case HANDCRAFTED_KV_BAD_VALUE_SIZE: return "KV_BAD_VALUE_SIZE"; + case HANDCRAFTED_KV_DUPLICATE_KEY: return "KV_DUPLICATE_KEY"; + case HANDCRAFTED_KV_SUCCESS: return "KV_RANDOM_KV"; + + case HANDCRAFTED_TENSORS_BAD_NAME_SIZE: return "TENSORS_BAD_NAME_SIZE"; + case HANDCRAFTED_TENSORS_BAD_N_DIMS: return "TENSORS_BAD_N_DIMS"; + case HANDCRAFTED_TENSORS_BAD_SHAPE: return "TENSORS_BAD_SHAPE"; + case HANDCRAFTED_TENSORS_NE_TOO_BIG: return "TENSORS_NE_TOO_BIG"; + case HANDCRAFTED_TENSORS_BAD_TYPE: return "TENSORS_BAD_TYPE"; + case HANDCRAFTED_TENSORS_BAD_OFFSET: return "TENSORS_BAD_OFFSET"; + case HANDCRAFTED_TENSORS_DUPLICATE_NAME: return "TENSORS_DUPLICATE_NAME"; + case HANDCRAFTED_TENSORS_BAD_ALIGNMENT: return "TENSORS_BAD_ALIGNMENT"; + case HANDCRAFTED_TENSORS_SUCCESS: return "TENSORS_SUCCESS"; + case HANDCRAFTED_TENSORS_CUSTOM_ALIGN: return "TENSORS_CUSTOM_ALIGN"; + + case HANDCRAFTED_DATA_NOT_ENOUGH_DATA: return "DATA_NOT_ENOUGH_DATA"; + case HANDCRAFTED_DATA_BAD_ALIGNMENT: return "DATA_BAD_ALIGNMENT"; + case HANDCRAFTED_DATA_SUCCESS: return "DATA_SUCCESS"; + case HANDCRAFTED_DATA_CUSTOM_ALIGN: return "DATA_CUSTOM_ALIGN"; + } + GGML_ABORT("fatal error"); +} + +static bool expect_context_not_null(const enum handcrafted_file_type hft) { + if (hft < offset_has_kv) { + return hft >= HANDCRAFTED_HEADER_EMPTY; + } + if (hft < offset_has_tensors) { + return hft >= HANDCRAFTED_KV_SUCCESS; + } + if (hft < offset_has_data) { + return hft >= HANDCRAFTED_TENSORS_SUCCESS; + } + return hft >= HANDCRAFTED_DATA_SUCCESS; +} + +typedef std::pair> tensor_config_t; + +std::vector get_tensor_configs(std::mt19937 & rng) { + std::vector tensor_configs; + tensor_configs.reserve(100); + + for (int i = 0; i < 100; ++i) { + const enum ggml_type type = ggml_type(rng() % GGML_TYPE_COUNT); + if (ggml_type_size(type) == 0) { + continue; + } + + std::array shape = {1, 1, 1, 1}; + shape[0] = (1 + rng() % 10) * ggml_blck_size(type); + const int n_dims = 1 + rng() % GGML_MAX_DIMS; + for (int i = 1; i < n_dims; ++i) { + shape[i] = 1 + rng() % 10; + } + + tensor_configs.push_back(std::make_pair(type, shape)); + } + + return tensor_configs; +} + +std::vector> get_kv_types(std::mt19937 rng) { + std::vector> kv_types; + kv_types.reserve(100); + + for (int i = 0; i < 100; ++i) { + const gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT); + + if (type == GGUF_TYPE_ARRAY) { + const gguf_type type_arr = gguf_type(rng() % GGUF_TYPE_COUNT); + if (type_arr == GGUF_TYPE_ARRAY) { + continue; + } + kv_types.push_back(std::make_pair(type, type_arr)); + continue; + } + + kv_types.push_back(std::make_pair(type, gguf_type(-1))); + } + std::shuffle(kv_types.begin(), kv_types.end(), rng); + + return kv_types; +} + +static void helper_write(const void * data, const size_t nbytes, FILE * file) { + GGML_ASSERT(fwrite(data, 1, nbytes, file) == nbytes); +} + +static FILE * get_handcrafted_file(const unsigned int seed, const enum handcrafted_file_type hft, const int extra_bytes = 0) { + FILE * file = tmpfile(); + + std::mt19937 rng(seed); + + if (hft == HANDCRAFTED_HEADER_BAD_MAGIC) { + const char bad_magic[4] = {'F', 'U', 'G', 'G'}; + helper_write(bad_magic, sizeof(bad_magic), file); + } else { + helper_write(GGUF_MAGIC, 4, file); + } + + if (hft == HANDCRAFTED_HEADER_BAD_VERSION_1) { + const uint32_t version = 1; + helper_write(&version, sizeof(version), file); + } else if (hft == HANDCRAFTED_HEADER_BAD_VERSION_FUTURE) { + const uint32_t version = GGUF_VERSION + 1; + helper_write(&version, sizeof(version), file); + } else { + const uint32_t version = GGUF_VERSION; + helper_write(&version, sizeof(version), file); + } + + std::vector tensor_configs; + if (hft >= offset_has_tensors) { + tensor_configs = get_tensor_configs(rng); + } + + if (hft == HANDCRAFTED_HEADER_BAD_N_TENSORS) { + const uint64_t n_tensors = -1; + helper_write(&n_tensors, sizeof(n_tensors), file); + } else { + const uint64_t n_tensors = tensor_configs.size(); + helper_write(&n_tensors, sizeof(n_tensors), file); + } + + std::vector> kv_types; + if (hft >= offset_has_kv) { + kv_types = get_kv_types(rng); + } + { + uint64_t n_kv = kv_types.size(); + if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) { + n_kv += 1; + } else if (hft == HANDCRAFTED_HEADER_BAD_N_KV) { + n_kv = -1; + } + helper_write(&n_kv, sizeof(n_kv), file); + } + + if (hft < offset_has_kv) { + for (int i = 0; i < extra_bytes; ++i) { + const char tmp = 0; + helper_write(&tmp, sizeof(tmp), file); + } + rewind(file); + return file; + } + + for (int i = 0; i < int(kv_types.size()); ++i) { + const enum gguf_type type = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? -1 : kv_types[i].first); + const enum gguf_type type_arr = gguf_type(hft == HANDCRAFTED_KV_BAD_TYPE ? -1 : kv_types[i].second); + + const std::string key = "my_key_" + std::to_string((hft == HANDCRAFTED_KV_DUPLICATE_KEY ? i/2 : i)); + + if (hft == HANDCRAFTED_KV_BAD_KEY_SIZE) { + const uint64_t n = -1; + helper_write(&n, sizeof(n), file); + } else { + const uint64_t n = key.length(); + helper_write(&n, sizeof(n), file); + } + helper_write(key.data(), key.length(), file); + + { + const int32_t type32 = int32_t(type); + helper_write(&type32, sizeof(type32), file); + } + + uint32_t data[16]; + for (int j = 0; j < 16; ++j) { + data[j] = rng(); + if (type == GGUF_TYPE_STRING || type_arr == GGUF_TYPE_STRING) { + data[j] |= 0x01010101; // avoid random null-termination of string + } + } + + if (type == GGUF_TYPE_STRING) { + const uint64_t n = rng() % sizeof(data); + helper_write(&n, sizeof(n), file); + helper_write(data, n, file); + continue; + } + + if (type == GGUF_TYPE_ARRAY) { + { + const int32_t type32 = int32_t(type_arr); + helper_write(&type32, sizeof(type32), file); + } + if (type_arr == GGUF_TYPE_STRING) { + const uint64_t nstr = rng() % (16 + 1); + helper_write(&nstr, sizeof(nstr), file); + for (uint64_t istr = 0; istr < nstr; ++istr) { + const uint64_t n = rng() % (sizeof(uint32_t) + 1); + helper_write(&n, sizeof(n), file); + helper_write(&data[istr], n, file); + } + continue; + } + const size_t type_size = gguf_type_size(type_arr); + const uint64_t n = (rng() % sizeof(data)) / type_size; + helper_write(&n, sizeof(n), file); + helper_write(&data, n*type_size, file); + continue; + } + + size_t type_size = hft == HANDCRAFTED_KV_BAD_TYPE ? 1 : gguf_type_size(type); + if (hft == HANDCRAFTED_KV_BAD_VALUE_SIZE) { + type_size += rng() % 3; + } + helper_write(data, type_size, file); + } + + if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) { + const std::string key = "general.alignment"; + { + const uint64_t n = key.length(); + helper_write(&n, sizeof(n), file); + } + helper_write(key.data(), key.length(), file); + + const int32_t type = gguf_type(GGUF_TYPE_UINT32); + helper_write(&type, sizeof(type), file); + + const uint32_t alignment = GGUF_DEFAULT_ALIGNMENT + 1; + helper_write(&alignment, sizeof(alignment), file); + } + + if (hft < offset_has_tensors) { + for (int i = 0; i < extra_bytes; ++i) { + const char tmp = 0; + helper_write(&tmp, sizeof(tmp), file); + } + rewind(file); + return file; + } + + uint32_t alignment = GGUF_DEFAULT_ALIGNMENT; + if (hft == HANDCRAFTED_TENSORS_BAD_ALIGNMENT || hft == HANDCRAFTED_DATA_BAD_ALIGNMENT) { + alignment -= 1; + } else if (hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN) { + alignment += 1; + } + + uint64_t offset = 0; + for (int i = 0; i < int(tensor_configs.size()); ++i) { + const ggml_type type = tensor_configs[i].first; + const std::array shape = tensor_configs[i].second; + + std::string name = "my_tensor"; + if (hft != HANDCRAFTED_TENSORS_DUPLICATE_NAME) { + name += "_" + std::to_string(i); + } + if (hft == HANDCRAFTED_TENSORS_BAD_NAME_SIZE) { + name += "_with_a_very_long_name_which_is_longer_than_what_is_allowed_for_ggml_tensors"; + GGML_ASSERT(name.length() >= GGML_MAX_NAME); + } + { + const uint64_t n = name.length(); + helper_write(&n, sizeof(n), file); + } + helper_write(name.data(), name.length(), file); + + uint32_t n_dims = hft == HANDCRAFTED_TENSORS_NE_TOO_BIG ? 2 : 1; + for (int i = GGML_MAX_DIMS-1; i >= 1; --i) { + if (shape[i] != 1) { + n_dims = i + 1; + break; + } + } + if (hft == HANDCRAFTED_TENSORS_BAD_N_DIMS) { + const uint32_t n_dims_bad = GGML_MAX_DIMS + 1; + helper_write(&n_dims_bad, sizeof(n_dims_bad), file); + } else { + helper_write(&n_dims, sizeof(n_dims), file); + } + + if (hft == HANDCRAFTED_TENSORS_BAD_SHAPE) { + for (uint32_t j = 0; j < n_dims; ++j) { + const int64_t bad_dim = -1; + helper_write(&bad_dim, sizeof(bad_dim), file); + } + } else if (hft == HANDCRAFTED_TENSORS_NE_TOO_BIG){ + for (uint32_t j = 0; j < n_dims; ++j) { + const int64_t big_dim = 4*int64_t(INT32_MAX); + helper_write(&big_dim, sizeof(big_dim), file); + } + } else { + helper_write(shape.data(), n_dims*sizeof(int64_t), file); + } + + { + const int32_t type32 = hft == HANDCRAFTED_TENSORS_BAD_TYPE ? -1 : int32_t(type); + helper_write(&type32, sizeof(type32), file); + } + + if (hft == HANDCRAFTED_TENSORS_BAD_OFFSET) { + const uint64_t bad_offset = -1; + helper_write(&bad_offset, sizeof(bad_offset), file); + } else { + helper_write(&offset, sizeof(offset), file); + } + + int64_t ne = shape[0]; + for (uint32_t i = 1; i < n_dims; ++i) { + ne *= shape[i]; + } + offset += GGML_PAD(ggml_row_size(type, ne), alignment); + } + + const uint32_t alignment_overshoot = ftell(file) % alignment; + if (alignment_overshoot != 0) { + for (size_t i = alignment_overshoot; i < alignment; ++i) { + const char pad = 0; + helper_write(&pad, sizeof(pad), file); + } + } + + if (hft >= offset_has_data) { + rng.seed(seed + 1); + uint64_t nbytes = offset; + if (hft == HANDCRAFTED_DATA_NOT_ENOUGH_DATA) { + nbytes -= 1; + } + for (uint64_t i = 0; i < nbytes; ++i) { + const uint8_t random_byte = i % 256; + helper_write(&random_byte, sizeof(random_byte), file); + } + } + + for (int i = 0; i < extra_bytes; ++i) { + const char tmp = 0; + helper_write(&tmp, sizeof(tmp), file); + } + rewind(file); + return file; +} + +static bool handcrafted_check_header(const gguf_context * gguf_ctx, const unsigned int seed, const bool has_kv, const bool has_tensors, const bool alignment_defined) { + if (!gguf_ctx) { + return false; + } + + std::mt19937 rng(seed); + + std::vector tensor_configs; + if (has_tensors) { + tensor_configs = get_tensor_configs(rng); + } + std::vector> kv_types; + if (has_kv) { + kv_types = get_kv_types(rng); + } + + bool ok = true; + + if (gguf_get_version(gguf_ctx) != GGUF_VERSION) { + ok = false; + } + if (gguf_get_n_tensors(gguf_ctx) != int(tensor_configs.size())) { + ok = false; + } + if (gguf_get_n_kv(gguf_ctx) != int(alignment_defined ? kv_types.size() + 1 : kv_types.size())) { + ok = false; + } + + return ok; +} + +static bool handcrafted_check_kv(const gguf_context * gguf_ctx, const unsigned int seed, const bool has_tensors, const bool alignment_defined) { + if (!gguf_ctx) { + return false; + } + + std::mt19937 rng(seed); + + std::vector tensor_configs; + if (has_tensors) { + tensor_configs = get_tensor_configs(rng); + } + + std::vector> kv_types = get_kv_types(rng); + + bool ok = true; + + for (int i = 0; i < int(kv_types.size()); ++i) { + const enum gguf_type type = gguf_type(kv_types[i].first); + const enum gguf_type type_arr = gguf_type(kv_types[i].second); + + const std::string key = "my_key_" + std::to_string(i); + + uint32_t data[16]; + for (int j = 0; j < 16; ++j) { + data[j] = rng(); + if (type == GGUF_TYPE_STRING || type_arr == GGUF_TYPE_STRING) { + data[j] |= 0x01010101; // avoid random null-termination of string + } + } + + const char * data8 = reinterpret_cast(data); + const int id = gguf_find_key(gguf_ctx, key.c_str()); + + if (type == GGUF_TYPE_STRING) { + const char * str = gguf_get_val_str(gguf_ctx, id); + const uint64_t n = strlen(str); + const uint64_t n_expected = rng() % sizeof(data); + if (n != n_expected) { + ok = false; + continue; + } + if (!std::equal(str, str + n, data8)) { + ok = false; + } + continue; + } + + if (type == GGUF_TYPE_ARRAY) { + const size_t type_size = gguf_type_size(type_arr); + const uint64_t arr_n = gguf_get_arr_n(gguf_ctx, id); + + if (type_arr == GGUF_TYPE_STRING) { + const uint64_t nstr_expected = rng() % (16 + 1); + if (arr_n != nstr_expected) { + ok = false; + continue; + } + for (uint64_t istr = 0; istr < nstr_expected; ++istr) { + const char * str = gguf_get_arr_str(gguf_ctx, id, istr); + const uint64_t n = strlen(str); + const uint64_t n_expected = rng() % (sizeof(uint32_t) + 1); + + if (n != n_expected) { + ok = false; + continue; + } + const char * str_expected = reinterpret_cast(&data[istr]); + if (strncmp(str, str_expected, n) != 0) { + ok = false; + continue; + } + } + continue; + } + + const uint64_t arr_n_expected = (rng() % sizeof(data)) / type_size; + if (arr_n != arr_n_expected) { + ok = false; + continue; + } + + const char * data_gguf = reinterpret_cast(gguf_get_arr_data(gguf_ctx, id)); + if (!std::equal(data8, data8 + arr_n*type_size, data_gguf)) { + ok = false; + } + continue; + } + + const char * data_gguf = reinterpret_cast(gguf_get_val_data(gguf_ctx, id)); + if (!std::equal(data8, data8 + gguf_type_size(type), data_gguf)) { + ok = false; + } + } + + const uint32_t expected_alignment = alignment_defined ? GGUF_DEFAULT_ALIGNMENT + 1 : GGUF_DEFAULT_ALIGNMENT; + if (gguf_get_alignment(gguf_ctx) != expected_alignment) { + ok = false; + } + + return ok; +} + +static bool handcrafted_check_tensors(const gguf_context * gguf_ctx, const unsigned int seed) { + if (!gguf_ctx) { + return false; + } + + std::mt19937 rng(seed); + + std::vector tensor_configs = get_tensor_configs(rng); + + // Call get_kv_types to get the same RNG state: + get_kv_types(rng); + + bool ok = true; + + const int id_alignment = gguf_find_key(gguf_ctx, "general.alignment"); + const uint32_t alignment = id_alignment >= 0 ? gguf_get_val_u32(gguf_ctx, id_alignment) : GGUF_DEFAULT_ALIGNMENT; + + uint64_t expected_offset = 0; + for (int i = 0; i < int(tensor_configs.size()); ++i) { + const ggml_type type = tensor_configs[i].first; + const std::array shape = tensor_configs[i].second; + + const std::string name = "my_tensor_" + std::to_string(i); + const int id = gguf_find_tensor(gguf_ctx, name.c_str()); + + if (id >= 0) { + if (std::string(gguf_get_tensor_name(gguf_ctx, id)) != name) { + ok = false; + } + + if (gguf_get_tensor_type(gguf_ctx, id) != type) { + ok = false; + } + } else { + ok = false; + continue; + } + + const size_t offset = gguf_get_tensor_offset(gguf_ctx, id); + + if (offset != expected_offset) { + ok = false; + } + + int64_t ne = shape[0]; + for (size_t j = 1; j < GGML_MAX_DIMS; ++j) { + ne *= shape[j]; + } + expected_offset += GGML_PAD(ggml_row_size(type, ne), alignment); + } + + return ok; +} + +static bool handcrafted_check_tensor_data(const gguf_context * gguf_ctx, const unsigned int seed, FILE * file) { + if (!gguf_ctx) { + return false; + } + + std::mt19937 rng(seed); + + std::vector tensor_configs = get_tensor_configs(rng); + + bool ok = true; + + const uint32_t alignment = GGUF_DEFAULT_ALIGNMENT; + + for (int i = 0; i < int(tensor_configs.size()); ++i) { + const ggml_type type = tensor_configs[i].first; + const std::array shape = tensor_configs[i].second; + + int64_t ne = shape[0]; + for (size_t j = 1; j < GGML_MAX_DIMS; ++j) { + ne *= shape[j]; + } + const size_t size = ggml_row_size(type, ne); + + const std::string name = "my_tensor_" + std::to_string(i); + const size_t offset = gguf_get_tensor_offset(gguf_ctx, gguf_find_tensor(gguf_ctx, name.c_str())); + + std::vector data(size); + GGML_ASSERT(fseek(file, gguf_get_data_offset(gguf_ctx) + offset, SEEK_SET) == 0); + GGML_ASSERT(fread(data.data(), 1, size, file) == size); + + for (size_t j = 0; j < size; ++j) { + const uint8_t expected_byte = (j + offset) % 256; + if (data[j] != expected_byte) { + ok = false; + } + } + } + + return ok; +} + +static std::pair test_handcrafted_file(const unsigned int seed) { + int npass = 0; + int ntest = 0; + + const std::vector hfts = { + HANDCRAFTED_HEADER_BAD_MAGIC, + HANDCRAFTED_HEADER_BAD_VERSION_1, + // HANDCRAFTED_FILE_TYPE_BAD_VERSION_FUTURE, // FIXME + HANDCRAFTED_HEADER_BAD_N_KV, + HANDCRAFTED_HEADER_BAD_N_TENSORS, + HANDCRAFTED_HEADER_EMPTY, + + HANDCRAFTED_KV_BAD_KEY_SIZE, + HANDCRAFTED_KV_BAD_TYPE, + HANDCRAFTED_KV_BAD_VALUE_SIZE, + // HANDCRAFTED_FILE_TYPE_DUPLICATE_KEY, // FIXME + HANDCRAFTED_KV_SUCCESS, + + HANDCRAFTED_TENSORS_BAD_NAME_SIZE, + HANDCRAFTED_TENSORS_BAD_N_DIMS, + HANDCRAFTED_TENSORS_BAD_SHAPE, + HANDCRAFTED_TENSORS_NE_TOO_BIG, + HANDCRAFTED_TENSORS_BAD_TYPE, + // HANDCRAFTED_TENSORS_BAD_OFFSET, // FIXME + HANDCRAFTED_TENSORS_DUPLICATE_NAME, + // HANDCRAFTED_TENSORS_BAD_ALIGNMENT, // FIXME + HANDCRAFTED_TENSORS_SUCCESS, + HANDCRAFTED_TENSORS_CUSTOM_ALIGN, + + HANDCRAFTED_DATA_NOT_ENOUGH_DATA, + // HANDCRAFTED_DATA_BAD_ALIGNMENT, // FIXME + HANDCRAFTED_DATA_SUCCESS, + HANDCRAFTED_DATA_CUSTOM_ALIGN, + }; + + for (enum handcrafted_file_type hft : hfts) { + printf("%s: handcrafted_file_type=%s\n", __func__, handcrafted_file_type_name(hft).c_str()); + FILE * file = get_handcrafted_file(seed, hft); + +#ifdef _WIN32 + if (!file) { + printf("%s: failed to create tmpfile(), needs elevated privileges on Windows"); + printf("%s: skipping tests"); + continue; + } +#else + GGML_ASSERT(file); +#endif // _WIN32 + + struct ggml_context * ctx = nullptr; + struct gguf_init_params gguf_params = { + /*no_alloc =*/ false, + /*ctx =*/ hft >= offset_has_data ? &ctx : nullptr, + }; + struct gguf_context * gguf_ctx = gguf_init_from_file_impl(file, gguf_params); + + if (expect_context_not_null(hft)) { + printf("%s: - context_not_null: ", __func__); + } else { + printf("%s: - context_null: ", __func__); + } + if (bool(gguf_ctx) == expect_context_not_null(hft)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + if (false && hft >= offset_has_data && !expect_context_not_null(hft)) { // FIXME + printf("%s: - no_dangling_ggml_context_pointer: ", __func__); + if (ctx) { + printf("\033[1;31mFAIL\033[0m\n"); + } else { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } + ntest++; + } + + if (false && expect_context_not_null(hft)) { // FIXME + FILE * file_eb = get_handcrafted_file(seed, hft, /*extra_bytes =*/ 1); + struct gguf_context * gguf_ctx_eb = gguf_init_from_file_impl(file_eb, gguf_params); + + printf("%s: - context_null_with_extra_bytes: ", __func__); + if (gguf_ctx_eb) { + printf("\033[1;31mFAIL\033[0m\n"); + } else { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } + ntest++; + + gguf_free(gguf_ctx_eb); + fclose(file_eb); + } + + const bool alignment_defined = hft == HANDCRAFTED_TENSORS_CUSTOM_ALIGN || hft == HANDCRAFTED_DATA_CUSTOM_ALIGN; + + if (expect_context_not_null(hft)) { + printf("%s: - check_header: ", __func__); + if (handcrafted_check_header(gguf_ctx, seed, hft >= offset_has_kv, hft >= offset_has_tensors, alignment_defined)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + } + + if (expect_context_not_null(hft) && hft >= offset_has_kv) { + printf("%s: - check_kv: ", __func__); + if (handcrafted_check_kv(gguf_ctx, seed, hft >= offset_has_tensors, alignment_defined)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + } + + if (expect_context_not_null(hft) && hft >= offset_has_tensors) { + printf("%s: - check_tensors: ", __func__); + if (handcrafted_check_tensors(gguf_ctx, seed)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + } + + if (expect_context_not_null(hft) && hft >= offset_has_data) { + printf("%s: - check_tensor_data: ", __func__); + if (handcrafted_check_tensor_data(gguf_ctx, seed, file)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + } + + if (gguf_ctx) { + ggml_free(ctx); + gguf_free(gguf_ctx); + } + fclose(file); + printf("\n"); + } + + return std::make_pair(npass, ntest); +} + +struct random_gguf_context_result { + struct gguf_context * gguf_ctx; + struct ggml_context * ctx; + ggml_backend_buffer_t buffer; +}; + +static struct random_gguf_context_result get_random_gguf_context(ggml_backend_t backend, const unsigned int seed) { + std::mt19937 rng(seed); + + struct gguf_context * gguf_ctx = gguf_init_empty(); + + for (int i = 0; i < 256; ++i) { + const std::string key = "my_key_" + std::to_string(rng() % 1024); + const enum gguf_type type = gguf_type(rng() % GGUF_TYPE_COUNT); + + if (type == GGUF_TYPE_STRING || type == GGUF_TYPE_ARRAY) { + continue; // FIXME memory leak + } + + switch (type) { + case GGUF_TYPE_UINT8: gguf_set_val_u8 (gguf_ctx, key.c_str(), rng() % (1 << 7)); break; + case GGUF_TYPE_INT8: gguf_set_val_i8 (gguf_ctx, key.c_str(), rng() % (1 << 7) - (1 << 6)); break; + case GGUF_TYPE_UINT16: gguf_set_val_u16 (gguf_ctx, key.c_str(), rng() % (1 << 15)); break; + case GGUF_TYPE_INT16: gguf_set_val_i16 (gguf_ctx, key.c_str(), rng() % (1 << 15) - (1 << 14)); break; + case GGUF_TYPE_UINT32: gguf_set_val_u32 (gguf_ctx, key.c_str(), rng()); break; + case GGUF_TYPE_INT32: gguf_set_val_i32 (gguf_ctx, key.c_str(), rng() - (1 << 30)); break; + case GGUF_TYPE_FLOAT32: gguf_set_val_f32 (gguf_ctx, key.c_str(), rng() % 1024 - 512); break; + case GGUF_TYPE_BOOL: gguf_set_val_bool(gguf_ctx, key.c_str(), rng() % 2 == 0); break; + case GGUF_TYPE_STRING: gguf_set_val_str (gguf_ctx, key.c_str(), std::to_string(rng()).c_str()); break; + case GGUF_TYPE_UINT64: gguf_set_val_u64 (gguf_ctx, key.c_str(), rng()); break; + case GGUF_TYPE_INT64: gguf_set_val_i64 (gguf_ctx, key.c_str(), rng() - (1 << 30)); break; + case GGUF_TYPE_FLOAT64: gguf_set_val_f32 (gguf_ctx, key.c_str(), rng() % 1024 - 512); break; + case GGUF_TYPE_ARRAY: { + const enum gguf_type type_arr = gguf_type(rng() % GGUF_TYPE_COUNT); + const uint64_t ne = rng() % 1024; + + switch (type_arr) { + case GGUF_TYPE_UINT8: + case GGUF_TYPE_INT8: + case GGUF_TYPE_UINT16: + case GGUF_TYPE_INT16: + case GGUF_TYPE_UINT32: + case GGUF_TYPE_INT32: + case GGUF_TYPE_FLOAT32: + case GGUF_TYPE_BOOL: + case GGUF_TYPE_UINT64: + case GGUF_TYPE_INT64: + case GGUF_TYPE_FLOAT64: { + const size_t nbytes = ne*gguf_type_size(type_arr); + std::vector random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t)); + for (size_t j = 0; j < random_data.size(); ++j) { + random_data[j] = rng(); + } + gguf_set_arr_data(gguf_ctx, key.c_str(), type_arr, random_data.data(), ne); + } break; + case GGUF_TYPE_STRING: { + std::vector data_cpp(ne); + std::vector data_c(ne); + for (size_t j = 0; j < data_cpp.size(); ++j) { + data_cpp[j] = std::to_string(rng()); + data_c[j] = data_cpp[j].c_str(); + } + gguf_set_arr_str(gguf_ctx, key.c_str(), data_c.data(), ne); + } break; + case GGUF_TYPE_ARRAY: { + break; // not supported + } + case GGUF_TYPE_COUNT: + default: { + GGML_ABORT("fatal error"); + } break; + } + } break; + case GGUF_TYPE_COUNT: + default: { + GGML_ABORT("fatal error"); + } break; + } + } + + struct ggml_init_params ggml_params = { + /*.mem_size =*/ 256*ggml_tensor_overhead(), + /*.mem_buffer =*/ nullptr, + /*.no_alloc =*/ true, + }; + struct ggml_context * ctx = ggml_init(ggml_params); + + for (int i = 0; i < 256; ++i) { + const std::string name = "my_tensor_" + std::to_string(i); + const enum ggml_type type = ggml_type(rng() % GGML_TYPE_COUNT); + const size_t type_size = ggml_type_size(type); + + if (type_size == 0) { + continue; + } + + const int n_dims = 1 + rng() % GGML_MAX_DIMS; + int64_t ne[GGML_MAX_DIMS]; + ne[0] = (1 + rng() % 10) * ggml_blck_size(type); + for (int j = 1; j < n_dims; ++j) { + ne[j] = 1 + rng() % 10; + } + + struct ggml_tensor * tensor = ggml_new_tensor(ctx, type, n_dims, ne); + ggml_set_name(tensor, name.c_str()); + } + + ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors(ctx, backend); + for (struct ggml_tensor * t = ggml_get_first_tensor(ctx); t != nullptr; t = ggml_get_next_tensor(ctx, t)) { + const size_t nbytes = ggml_nbytes(t); + std::vector random_data((nbytes + sizeof(uint32_t) - 1) / sizeof(uint32_t)); + for (size_t j = 0; j < random_data.size(); ++j) { + random_data[j] = rng(); + } + ggml_backend_tensor_set(t, random_data.data(), 0, nbytes); + + gguf_add_tensor(gguf_ctx, t); + } + + return {gguf_ctx, ctx, buf}; +} + +static bool all_kv_in_other(const gguf_context * ctx, const gguf_context * other) { + bool ok = true; + + const int n_kv = gguf_get_n_kv(ctx); + for (int id = 0; id < n_kv; ++id) { + const char * name = gguf_get_key(ctx, id); + + const int idx_other = gguf_find_key(other, name); + if (idx_other < 0) { + ok = false; + continue; + } + + const gguf_type type = gguf_get_kv_type(ctx, id); + if (type != gguf_get_kv_type(other, idx_other)) { + ok = false; + continue; + } + + if (type == GGUF_TYPE_ARRAY) { + const int arr_n = gguf_get_arr_n(ctx, id); + if (arr_n != gguf_get_arr_n(other, idx_other)) { + ok = false; + continue; + } + + const gguf_type type_arr = gguf_get_arr_type(ctx, id); + if (type_arr != gguf_get_arr_type(other, idx_other)) { + ok = false; + continue; + } + + if (type_arr == GGUF_TYPE_STRING) { + for (int arr_i = 0; arr_i < arr_n; ++arr_i) { + const std::string str = gguf_get_arr_str(ctx, id, arr_i); + const std::string str_other = gguf_get_arr_str(other, idx_other, arr_i); + if (str != str_other) { + ok = false; + } + } + continue; + } + + const char * data = reinterpret_cast(gguf_get_arr_data(ctx, id)); + const char * data_other = reinterpret_cast(gguf_get_arr_data(other, idx_other)); + if (!std::equal(data, data + arr_n*gguf_type_size(type_arr), data_other)) { + ok = false; + } + continue; + } + + if (type == GGUF_TYPE_STRING) { + const std::string str = gguf_get_val_str(ctx, id); + const std::string str_other = gguf_get_val_str(other, idx_other); + if (str != str_other) { + ok = false; + } + continue; + } + + const char * data = reinterpret_cast(gguf_get_val_data(ctx, id)); + const char * data_other = reinterpret_cast(gguf_get_val_data(other, idx_other)); + if (!std::equal(data, data + gguf_type_size(type), data_other)) { + ok = false; + } + } + + return ok; +} + +static bool all_tensors_in_other(const gguf_context * ctx, const gguf_context * other) { + bool ok = true; + + const int n_tensors = gguf_get_n_tensors(ctx); + for (int id = 0; id < n_tensors; ++id) { + const std::string name = gguf_get_tensor_name(ctx, id); + + const int idx_other = gguf_find_tensor(other, name.c_str()); + if (id != idx_other) { + ok = false; + if (idx_other < 0) { + continue; + } + } + + const ggml_type type = gguf_get_tensor_type(ctx, id); + if (type != gguf_get_tensor_type(other, id)) { + ok = false; + } + + const size_t offset = gguf_get_tensor_offset(ctx, id); + if (offset != gguf_get_tensor_offset(other, id)) { + ok = false; + } + } + + return ok; +} + +static bool same_tensor_data(const struct ggml_context * orig, const struct ggml_context * read) { + bool ok = true; + + struct ggml_tensor * t_orig = ggml_get_first_tensor(orig); + struct ggml_tensor * t_read = ggml_get_first_tensor(read); + while (t_orig) { + if (!t_read) { + ok = false; + break; + } + + const size_t nbytes = ggml_nbytes(t_orig); + if (ggml_nbytes(t_read) != nbytes) { + ok = false; + break; + } + std::vector data_orig(nbytes); + ggml_backend_tensor_get(t_orig, data_orig.data(), 0, nbytes); + if (!std::equal(data_orig.data(), data_orig.data() + nbytes, reinterpret_cast(t_read->data))) { + ok = false; + } + + t_orig = ggml_get_next_tensor(orig, t_orig); + t_read = ggml_get_next_tensor(orig, t_read); + } + if (t_read) { + ok = false; + } + + return true; +} + +static std::pair test_roundtrip(ggml_backend_dev_t dev, const unsigned int seed, const bool only_meta) { + FILE * file = tmpfile(); +#ifdef _WIN32 + if (!file) { + printf("%s: failed to create tmpfile(), needs elevated privileges on Windows"); + printf("%s: skipping tests"); + return std::make_pair(0, 0); + } +#else + GGML_ASSERT(file); +#endif // _WIN32 + + if (ggml_backend_dev_type(dev) != GGML_BACKEND_DEVICE_TYPE_CPU) { + return std::make_pair(0, 0); // FIXME + } + + ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); + printf("%s: device=%s, backend=%s, only_meta=%s\n", + __func__, ggml_backend_dev_description(dev), ggml_backend_name(backend), only_meta ? "yes" : "no"); + + int npass = 0; + int ntest = 0; + + struct gguf_context * gguf_ctx_0; + struct ggml_context * ctx_0; + ggml_backend_buffer_t bbuf; + { + struct random_gguf_context_result result = get_random_gguf_context(backend, seed); + gguf_ctx_0 = result.gguf_ctx; + ctx_0 = result.ctx; + bbuf = result.buffer; + } + + struct gguf_buf gbuf = gguf_buf_init(16 * 1024); + gguf_write_to_buf(gguf_ctx_0, &gbuf, only_meta); + helper_write(gbuf.data, gbuf.offset, file); + rewind(file); + + struct ggml_context * ctx_1 = nullptr; + struct gguf_init_params gguf_params = { + /*no_alloc =*/ false, + /*ctx =*/ only_meta ? nullptr : &ctx_1, + }; + struct gguf_context * gguf_ctx_1 = gguf_init_from_file_impl(file, gguf_params); + + printf("%s: same_version: ", __func__); + if (gguf_get_version(gguf_ctx_0) == gguf_get_version(gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: same_n_kv: ", __func__); + if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: same_n_tensors: ", __func__); + if (gguf_get_n_tensors(gguf_ctx_0) == gguf_get_n_tensors(gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_orig_kv_in_read: ", __func__); + if (all_kv_in_other(gguf_ctx_0, gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_read_kv_in_orig: ", __func__); + if (all_kv_in_other(gguf_ctx_1, gguf_ctx_0)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_orig_tensors_in_read: ", __func__); + if (all_tensors_in_other(gguf_ctx_0, gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_read_tensors_in_orig: ", __func__); + if (all_tensors_in_other(gguf_ctx_1, gguf_ctx_0)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + if (!only_meta) { + printf("%s: same_tensor_data: ", __func__); + if (same_tensor_data(ctx_0, ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + } + + ggml_backend_buffer_free(bbuf); + ggml_free(ctx_0); + ggml_free(ctx_1); + gguf_free(gguf_ctx_0); + gguf_free(gguf_ctx_1); + gguf_buf_free(gbuf); + ggml_backend_free(backend); + GGML_ASSERT(fclose(file) == 0); + + printf("\n"); + return std::make_pair(npass, ntest); +} + +static std::pair test_gguf_set_kv(ggml_backend_dev_t dev, const unsigned int seed) { + ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); + printf("%s: device=%s, backend=%s\n", __func__, ggml_backend_dev_description(dev), ggml_backend_name(backend)); + + int npass = 0; + int ntest = 0; + + struct gguf_context * gguf_ctx_0; + struct ggml_context * ctx_0; + ggml_backend_buffer_t bbuf_0; + { + struct random_gguf_context_result result = get_random_gguf_context(backend, seed); + gguf_ctx_0 = result.gguf_ctx; + ctx_0 = result.ctx; + bbuf_0 = result.buffer; + } + + struct gguf_context * gguf_ctx_1; + struct ggml_context * ctx_1; + ggml_backend_buffer_t bbuf_1; + { + struct random_gguf_context_result result = get_random_gguf_context(backend, seed + 1); + gguf_ctx_1 = result.gguf_ctx; + ctx_1 = result.ctx; + bbuf_1 = result.buffer; + } + + struct gguf_context * gguf_ctx_2 = gguf_init_empty(); + + gguf_set_kv(gguf_ctx_1, gguf_ctx_0); + gguf_set_kv(gguf_ctx_2, gguf_ctx_0); + + printf("%s: same_n_kv: ", __func__); + if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_2)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_kv_0_in_1: ", __func__); + if (all_kv_in_other(gguf_ctx_0, gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_kv_0_in_2: ", __func__); + if (all_kv_in_other(gguf_ctx_0, gguf_ctx_2)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + gguf_set_kv(gguf_ctx_0, gguf_ctx_1); + + printf("%s: same_n_kv_after_double_copy: ", __func__); + if (gguf_get_n_kv(gguf_ctx_0) == gguf_get_n_kv(gguf_ctx_1)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + printf("%s: all_kv_1_in_0_after_double_copy: ", __func__); + if (all_kv_in_other(gguf_ctx_1, gguf_ctx_0)) { + printf("\033[1;32mOK\033[0m\n"); + npass++; + } else { + printf("\033[1;31mFAIL\033[0m\n"); + } + ntest++; + + ggml_backend_buffer_free(bbuf_0); + ggml_backend_buffer_free(bbuf_1); + ggml_free(ctx_0); + ggml_free(ctx_1); + gguf_free(gguf_ctx_0); + gguf_free(gguf_ctx_1); + gguf_free(gguf_ctx_2); + ggml_backend_free(backend); + + printf("\n"); + return std::make_pair(npass, ntest); +} + +static void print_usage() { + printf("usage: test-gguf [seed]\n"); + printf(" if no seed is unspecified then a random seed is used\n"); +} + +int main(int argc, char ** argv) { + if (argc > 2) { + print_usage(); + return 1; + } + + std::random_device rd; + const unsigned int seed = argc < 2 ? rd() : std::stoi(argv[1]); + + // Initialize ggml backends early so the prints aren't interleaved with the test results: + ggml_backend_dev_count(); + fprintf(stderr, "\n"); + + int npass = 0; + int ntest = 0; + { + std::pair result = test_handcrafted_file(seed); + npass += result.first; + ntest += result.second; + } + + for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { + ggml_backend_dev_t dev = ggml_backend_dev_get(i); + + for (bool only_meta : {true, false}) { + std::pair result = test_roundtrip(dev, seed, only_meta); + npass += result.first; + ntest += result.second; + } + + { + std::pair result = test_gguf_set_kv(dev, seed); + npass += result.first; + ntest += result.second; + } + } + + printf("%d/%d tests passed\n", npass, ntest); + if (npass != ntest) { + printf("\033[1;31mFAIL\033[0m\n"); + return 1; + } + printf("\033[1;32mOK\033[0m\n"); + return 0; +}