llama : do not use KV cache for non-causal models

ggml-ci
This commit is contained in:
Georgi Gerganov 2024-03-04 13:31:03 +02:00
parent d0347840c1
commit eb42596277
No known key found for this signature in database
GPG key ID: BF970631944C16B7
3 changed files with 109 additions and 39 deletions

View file

@ -13,7 +13,7 @@ async def main():
model_url = "http://127.0.0.1:6900"
responses: list[requests.Response] = await asyncio.gather(*[requests_post_async(
url= f"{model_url}/embedding",
json= {"content": str(i)*32}
json= {"content": str(0)*32}
) for i in range(n)])
for response in responses: