Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
common: llama_load_model_from_url windows set CURLOPT_SSL_OPTIONS, CU…
Browse files Browse the repository at this point in the history
…RLSSLOPT_NATIVE_CA
phymbert committed Mar 17, 2024
1 parent 9ca4acc commit c1b002e
Showing 5 changed files with 23 additions and 2 deletions.
6 changes: 5 additions & 1 deletion .github/workflows/server.yml
Original file line number Diff line number Diff line change
@@ -131,11 +131,15 @@ jobs:
run: |
pip install -r examples/server/tests/requirements.txt
- name: Copy Libcurl
id: prepare_libcurl
run: |
cp $env:RUNNER_TEMP/libcurl/bin/libcurl-x64.dll ./build/bin/Release/libcurl-x64.dll
- name: Tests
id: server_integration_tests
if: ${{ !matrix.disabled_on_pr || !github.event.pull_request }}
run: |
cp $env:RUNNER_TEMP/libcurl/bin/libcurl-x64.dll ./build/bin/Release/libcurl.dll
cd examples/server/tests
behave.exe --summary --stop --no-capture --exclude 'issues|wrong_usages|passkey' --tags llama.cpp
5 changes: 5 additions & 0 deletions common/common.cpp
Original file line number Diff line number Diff line change
@@ -1660,6 +1660,11 @@ struct llama_model * llama_load_model_from_url(const char * model_url, const cha
// Set the URL, allow to follow http redirection
curl_easy_setopt(curl, CURLOPT_URL, model_url);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
#if defined(_WIN32)
// CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
// operating system. Currently implemented under MS-Windows.
curl_easy_setopt(curl, CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
#endif

// Check if the file already exists locally
struct stat model_file_info;
2 changes: 1 addition & 1 deletion examples/server/tests/features/embeddings.feature
Original file line number Diff line number Diff line change
@@ -5,7 +5,7 @@ Feature: llama.cpp server
Background: Server startup
Given a server listening on localhost:8080
And a model url https://huggingface.co/ggml-org/models/resolve/main/bert-bge-small/ggml-model-f16.gguf
And a model file /tmp/ggml-model-f16.gguf
And a model file ggml-model-f16.gguf
And a model alias bert-bge-small
And 42 as server seed
And 2 slots
10 changes: 10 additions & 0 deletions examples/server/tests/features/environment.py
Original file line number Diff line number Diff line change
@@ -33,6 +33,16 @@ def after_scenario(context, scenario):
print("\x1b[33;101mERROR: Server stopped listening\x1b[0m\n")

if not pid_exists(context.server_process.pid):
print("Trying to find server logs:")
out, err = context.server_process.communicate()
if out:
print("Server stdout:\n")
print(out)
print("\n")
if err:
print("Server stderr:\n")
print(err)
print("\n")
assert False, f"Server not running pid={context.server_process.pid} ..."

server_graceful_shutdown(context)
2 changes: 2 additions & 0 deletions examples/server/tests/features/steps/steps.py
Original file line number Diff line number Diff line change
@@ -1094,6 +1094,8 @@ def start_server_background(context):

pkwargs = {
'creationflags': flags,
'stderr': subprocess.PIPE,
'stdout': subprocess.PIPE
}
context.server_process = subprocess.Popen(
[str(arg) for arg in [context.server_path, *server_args]],

0 comments on commit c1b002e

Please sign in to comment.