server: tests: add split tests, and HF options params

This commit is contained in:
Pierrick HYMBERT 2024-03-23 12:53:30 +01:00
parent 3ba5f2d124
commit b4a2ed8585
3 changed files with 13 additions and 7 deletions

View file

@ -4,7 +4,8 @@ Feature: Parallel
Background: Server startup Background: Server startup
Given a server listening on localhost:8080 Given a server listening on localhost:8080
And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models And a model file tinyllamas/split/stories15M-00001-of-00003.gguf from HF repo ggml-org/models
And a model file test-model-00001-of-00003.gguf
And 42 as server seed And 42 as server seed
And 128 as batch size And 128 as batch size
And 256 KV cache size And 256 KV cache size

View file

@ -4,8 +4,8 @@ Feature: llama.cpp server
Background: Server startup Background: Server startup
Given a server listening on localhost:8080 Given a server listening on localhost:8080
And a model url https://huggingface.co/ggml-org/models/resolve/main/tinyllamas/stories260K.gguf And a model file tinyllamas/stories260K.gguf from HF repo ggml-org/models
And a model file stories260K.gguf And a model file test-model.gguf
And a model alias tinyllama-2 And a model alias tinyllama-2
And 42 as server seed And 42 as server seed
# KV Cache corresponds to the total amount of tokens # KV Cache corresponds to the total amount of tokens

View file

@ -16,7 +16,6 @@ import numpy as np
import openai import openai
from behave import step from behave import step
from behave.api.async_step import async_run_until_complete from behave.api.async_step import async_run_until_complete
from huggingface_hub import hf_hub_download
from prometheus_client import parser from prometheus_client import parser
@ -39,6 +38,8 @@ def step_server_config(context, server_fqdn, server_port):
context.model_alias = None context.model_alias = None
context.model_file = None context.model_file = None
context.model_hf_repo = None
context.model_hf_file = None
context.model_url = None context.model_url = None
context.n_batch = None context.n_batch = None
context.n_ubatch = None context.n_ubatch = None
@ -68,9 +69,9 @@ def step_server_config(context, server_fqdn, server_port):
@step('a model file {hf_file} from HF repo {hf_repo}') @step('a model file {hf_file} from HF repo {hf_repo}')
def step_download_hf_model(context, hf_file, hf_repo): def step_download_hf_model(context, hf_file, hf_repo):
context.model_file = hf_hub_download(repo_id=hf_repo, filename=hf_file) context.model_hf_repo = hf_repo
if context.debug: context.model_hf_file = hf_file
print(f"model file: {context.model_file}") context.model_file = os.path.basename(hf_file)
@step('a model file {model_file}') @step('a model file {model_file}')
@ -1079,6 +1080,10 @@ def start_server_background(context):
server_args.extend(['--model', context.model_file]) server_args.extend(['--model', context.model_file])
if context.model_url: if context.model_url:
server_args.extend(['--model-url', context.model_url]) server_args.extend(['--model-url', context.model_url])
if context.model_hf_repo:
server_args.extend(['--hf-repo', context.model_hf_repo])
if context.model_hf_file:
server_args.extend(['--hf-file', context.model_hf_file])
if context.n_batch: if context.n_batch:
server_args.extend(['--batch-size', context.n_batch]) server_args.extend(['--batch-size', context.n_batch])
if context.n_ubatch: if context.n_ubatch: