From 80bec9890a57bc53d28c22669dbe9a6eed8ae1b9 Mon Sep 17 00:00:00 2001 From: Pierrick HYMBERT Date: Sat, 16 Mar 2024 14:08:21 +0100 Subject: [PATCH] llama_load_model_from_url: try to make the windows build passing --- common/common.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 1f57493dfda35..fc315e2fb4dc5 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1394,7 +1394,6 @@ struct llama_model * llama_load_model_from_url(const char * model_url, const cha curl_global_init(CURL_GLOBAL_DEFAULT); auto curl = curl_easy_init(); - if (!curl) { curl_global_cleanup(); fprintf(stderr, "%s: error initializing lib curl\n", __func__); @@ -1445,11 +1444,13 @@ struct llama_model * llama_load_model_from_url(const char * model_url, const cha return llama_load_model_from_file(path_model, params); } #else -struct llama_model * llama_load_model_from_url(const char *, const char *, - struct llama_model_params) { - fprintf(stderr, "%s: llama.cpp built without SSL support, downloading from url not supported.\n", __func__); + +struct llama_model *llama_load_model_from_url(const char * /*model_url*/, const char * /*path_model*/, + struct llama_model_params /*params*/) { + fprintf(stderr, "%s: llama.cpp built without curl support, downloading from an url not supported.\n", __func__); return nullptr; } + #endif std::tuple llama_init_from_gpt_params(gpt_params & params) {