fix tests

This commit is contained in:
Xuan Son Nguyen 2024-10-08 11:48:35 +02:00
parent 468551e7a6
commit 3bea2e6a86
4 changed files with 15 additions and 14 deletions

View file

@ -3249,7 +3249,7 @@ int main(int argc, char ** argv) {
if (!params.api_keys.empty()) { if (!params.api_keys.empty()) {
// for now, if API key is set, web UI is unusable // for now, if API key is set, web UI is unusable
svr->Get("/", [&](const httplib::Request & req, httplib::Response & res) { svr->Get("/", [&](const httplib::Request &, httplib::Response & res) {
return res.set_content("Web UI is disabled because API key is set.", "text/html; charset=utf-8"); return res.set_content("Web UI is disabled because API key is set.", "text/html; charset=utf-8");
}); });
} else { } else {

View file

@ -5,7 +5,7 @@ Feature: Security
Background: Server startup with an api key defined Background: Server startup with an api key defined
Given a server listening on localhost:8080 Given a server listening on localhost:8080
And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models
And a server api key llama.cpp And a server api key THIS_IS_THE_KEY
Then the server is starting Then the server is starting
Then the server is healthy Then the server is healthy
@ -16,11 +16,11 @@ Feature: Security
And a completion request with <api_error> api error And a completion request with <api_error> api error
Examples: Prompts Examples: Prompts
| api_key | api_error | | api_key | api_error |
| llama.cpp | no | | THIS_IS_THE_KEY | no |
| llama.cpp | no | | THIS_IS_THE_KEY | no |
| hackeme | raised | | hackeme | raised |
| | raised | | | raised |
Scenario Outline: OAI Compatibility Scenario Outline: OAI Compatibility
Given a system prompt test Given a system prompt test
@ -32,10 +32,10 @@ Feature: Security
Given an OAI compatible chat completions request with <api_error> api error Given an OAI compatible chat completions request with <api_error> api error
Examples: Prompts Examples: Prompts
| api_key | api_error | | api_key | api_error |
| llama.cpp | no | | THIS_IS_THE_KEY | no |
| llama.cpp | no | | THIS_IS_THE_KEY | no |
| hackme | raised | | hackme | raised |
Scenario Outline: OAI Compatibility (invalid response formats) Scenario Outline: OAI Compatibility (invalid response formats)
Given a system prompt test Given a system prompt test
@ -55,7 +55,7 @@ Feature: Security
Scenario Outline: CORS Options Scenario Outline: CORS Options
Given a user api key llama.cpp Given a user api key THIS_IS_THE_KEY
When an OPTIONS request is sent from <origin> When an OPTIONS request is sent from <origin>
Then CORS header <cors_header> is set to <cors_header_value> Then CORS header <cors_header> is set to <cors_header_value>

View file

@ -1299,7 +1299,8 @@ async def wait_for_slots_status(context,
async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session: async with aiohttp.ClientSession(timeout=DEFAULT_TIMEOUT_SECONDS) as session:
while True: while True:
async with await session.get(f'{base_url}/slots', params=params) as slots_response: headers = {'Authorization': f'Bearer {context.server_api_key}'}
async with await session.get(f'{base_url}/slots', params=params, headers=headers) as slots_response:
status_code = slots_response.status status_code = slots_response.status
slots = await slots_response.json() slots = await slots_response.json()
if context.debug: if context.debug:

View file

@ -90,7 +90,7 @@ inline std::string format_chat(const struct llama_model * model, const std::stri
return formatted_chat; return formatted_chat;
} }
std::string llama_get_chat_template(const struct llama_model * model) { static std::string llama_get_chat_template(const struct llama_model * model) {
std::string template_key = "tokenizer.chat_template"; std::string template_key = "tokenizer.chat_template";
// call with NULL buffer to get the total size of the string // call with NULL buffer to get the total size of the string
int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0); int32_t res = llama_model_meta_val_str(model, template_key.c_str(), NULL, 0);