common: llama_load_model_from_url windows set CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA

This commit is contained in:
Pierrick HYMBERT 2024-03-17 09:35:19 +01:00
parent 9ca4acc5fb
commit c1b002e067
5 changed files with 23 additions and 2 deletions

View file

@ -131,11 +131,15 @@ jobs:
run: |
pip install -r examples/server/tests/requirements.txt
- name: Copy Libcurl
id: prepare_libcurl
run: |
cp $env:RUNNER_TEMP/libcurl/bin/libcurl-x64.dll ./build/bin/Release/libcurl-x64.dll
- name: Tests
id: server_integration_tests
if: ${{ !matrix.disabled_on_pr || !github.event.pull_request }}
run: |
cp $env:RUNNER_TEMP/libcurl/bin/libcurl-x64.dll ./build/bin/Release/libcurl.dll
cd examples/server/tests
behave.exe --summary --stop --no-capture --exclude 'issues|wrong_usages|passkey' --tags llama.cpp

View file

@ -1660,6 +1660,11 @@ struct llama_model * llama_load_model_from_url(const char * model_url, const cha
// Set the URL, allow to follow http redirection
curl_easy_setopt(curl, CURLOPT_URL, model_url);
curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
#if defined(_WIN32)
// CURLSSLOPT_NATIVE_CA tells libcurl to use standard certificate store of
// operating system. Currently implemented under MS-Windows.
curl_easy_setopt(curl, CURLOPT_SSL_OPTIONS, CURLSSLOPT_NATIVE_CA);
#endif
// Check if the file already exists locally
struct stat model_file_info;

View file

@ -5,7 +5,7 @@ Feature: llama.cpp server
Background: Server startup
Given a server listening on localhost:8080
And a model url https://huggingface.co/ggml-org/models/resolve/main/bert-bge-small/ggml-model-f16.gguf
And a model file /tmp/ggml-model-f16.gguf
And a model file ggml-model-f16.gguf
And a model alias bert-bge-small
And 42 as server seed
And 2 slots

View file

@ -33,6 +33,16 @@ def after_scenario(context, scenario):
print("\x1b[33;101mERROR: Server stopped listening\x1b[0m\n")
if not pid_exists(context.server_process.pid):
print("Trying to find server logs:")
out, err = context.server_process.communicate()
if out:
print("Server stdout:\n")
print(out)
print("\n")
if err:
print("Server stderr:\n")
print(err)
print("\n")
assert False, f"Server not running pid={context.server_process.pid} ..."
server_graceful_shutdown(context)

View file

@ -1094,6 +1094,8 @@ def start_server_background(context):
pkwargs = {
'creationflags': flags,
'stderr': subprocess.PIPE,
'stdout': subprocess.PIPE
}
context.server_process = subprocess.Popen(
[str(arg) for arg in [context.server_path, *server_args]],