From ea5a7fbc9532f4ae3f4f55bda0078ab9ce64729e Mon Sep 17 00:00:00 2001 From: Elsa Date: Tue, 25 Jul 2023 18:07:27 +0800 Subject: [PATCH 1/4] Use coversation template from fastchat for api proxy Fix eventsource format --- examples/server/api_like_OAI.py | 65 +++++++++++++++++++++++---------- 1 file changed, 45 insertions(+), 20 deletions(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index aa325a03e..3f166609c 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -1,14 +1,18 @@ import argparse from flask import Flask, jsonify, request, Response +from flask_cors import CORS import urllib.parse import requests import time import json +from fastchat import conversation app = Flask(__name__) +CORS(app) parser = argparse.ArgumentParser(description="An example of using server.cpp with a similar API to OAI. It must be used together with server.cpp.") +parser.add_argument("--chat-prompt-model", type=str, help="Set the model", default="") parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: 'A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n') parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: '\\nUSER: ')", default="\\nUSER: ") parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: '\\nASSISTANT: ')", default="\\nASSISTANT: ") @@ -29,25 +33,46 @@ def is_present(json, key): return True +use_conversation_template = args.chat_prompt_model != "" + +if use_conversation_template: + conv = conversation.get_conv_template(args.chat_prompt_model) + stop_token = conv.stop_str +else: + stop_token = args.stop + #convert chat to prompt def convert_chat(messages): - prompt = "" + args.chat_prompt.replace("\\n", "\n") + if use_conversation_template: + conv = conversation.get_conv_template(args.chat_prompt_model) + for line in messages: + if (line["role"] == "system"): + try: + conv.set_system_msg(line["content"]) + except Exception: + pass + elif (line["role"] == "user"): + conv.append_message(conv.roles[0], line["content"]) + elif (line["role"] == "assistant"): + conv.append_message(conv.roles[1], line["content"]) + conv.append_message(conv.roles[1], None) + prompt = conv.get_prompt() + else: + prompt = "" + args.chat_prompt.replace("\\n", "\n") + system_n = args.system_name.replace("\\n", "\n") + user_n = args.user_name.replace("\\n", "\n") + ai_n = args.ai_name.replace("\\n", "\n") + stop = stop_token.replace("\\n", "\n") - system_n = args.system_name.replace("\\n", "\n") - user_n = args.user_name.replace("\\n", "\n") - ai_n = args.ai_name.replace("\\n", "\n") - stop = args.stop.replace("\\n", "\n") - - - for line in messages: - if (line["role"] == "system"): - prompt += f"{system_n}{line['content']}" - if (line["role"] == "user"): - prompt += f"{user_n}{line['content']}" - if (line["role"] == "assistant"): - prompt += f"{ai_n}{line['content']}{stop}" - prompt += ai_n.rstrip() + for line in messages: + if (line["role"] == "system"): + prompt += f"{system_n}{line['content']}" + if (line["role"] == "user"): + prompt += f"{user_n}{line['content']}" + if (line["role"] == "assistant"): + prompt += f"{ai_n}{line['content']}{stop}" + prompt += ai_n.rstrip() return prompt @@ -69,8 +94,8 @@ def make_postData(body, chat=False, stream=False): if(is_present(body, "mirostat_eta")): postData["mirostat_eta"] = body["mirostat_eta"] if(is_present(body, "seed")): postData["seed"] = body["seed"] if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()] - if (args.stop != ""): - postData["stop"] = [args.stop] + if stop_token: # "" or None + postData["stop"] = [stop_token] else: postData["stop"] = [] if(is_present(body, "stop")): postData["stop"] += body["stop"] @@ -173,12 +198,12 @@ def chat_completions(): data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True) time_now = int(time.time()) resData = make_resData_stream({}, chat=True, time_now=time_now, start=True) - yield 'data: {}\n'.format(json.dumps(resData)) + yield 'data: {}\n\n'.format(json.dumps(resData)) for line in data.iter_lines(): if line: decoded_line = line.decode('utf-8') resData = make_resData_stream(json.loads(decoded_line[6:]), chat=True, time_now=time_now) - yield 'data: {}\n'.format(json.dumps(resData)) + yield 'data: {}\n\n'.format(json.dumps(resData)) return Response(generate(), mimetype='text/event-stream') @@ -212,7 +237,7 @@ def completion(): if line: decoded_line = line.decode('utf-8') resData = make_resData_stream(json.loads(decoded_line[6:]), chat=False, time_now=time_now) - yield 'data: {}\n'.format(json.dumps(resData)) + yield 'data: {}\n\n'.format(json.dumps(resData)) return Response(generate(), mimetype='text/event-stream') if __name__ == '__main__': From bee2a3d981c9e9fdfa5d5f47c2d38920e96d1607 Mon Sep 17 00:00:00 2001 From: Elsa Date: Tue, 25 Jul 2023 18:16:31 +0800 Subject: [PATCH 2/4] Add docs Make fschat and flask-cors optional --- examples/server/README.md | 10 ++++++++++ examples/server/api_like_OAI.py | 17 +++++++++++------ 2 files changed, 21 insertions(+), 6 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index e5ca8269b..051a75090 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -195,6 +195,14 @@ bash chat.sh API example using Python Flask: [api_like_OAI.py](api_like_OAI.py) This example must be used with server.cpp +requirements: + +```shell +pip install flask flask-cors fschat # flask-cors and fschat are optional. flask-cors is used to allow cross-origin requests, fschat is used for integration of chat template +``` + +Run the server: + ```sh python api_like_OAI.py ``` @@ -204,6 +212,8 @@ After running the API server, you can use it in Python by setting the API base U openai.api_base = "http://:port" ``` +For better integration with the model, it is recommended to utilize the `--chat-prompt-model` parameter when starting up the system, rather than relying solely on parameters like `--user-name`. This specific parameter accepts model names that have been registered within the [FastChat/conversation.py](https://github.com/lm-sys/FastChat/blob/main/fastchat/conversation.py) file, an example would be `llama-2`. + Then you can utilize llama.cpp as an OpenAI's **chat.completion** or **text_completion** API ### Extending or building alternative Web Front End diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 3f166609c..50f2750c1 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -1,18 +1,23 @@ import argparse from flask import Flask, jsonify, request, Response -from flask_cors import CORS import urllib.parse import requests import time import json -from fastchat import conversation - +try: + from fastchat import conversation +except ImportError: + conversation = None app = Flask(__name__) -CORS(app) +try: + from flask_cors import CORS + CORS(app) +except ImportError: + pass parser = argparse.ArgumentParser(description="An example of using server.cpp with a similar API to OAI. It must be used together with server.cpp.") -parser.add_argument("--chat-prompt-model", type=str, help="Set the model", default="") +parser.add_argument("--chat-prompt-model", type=str, help="Set the model name of conversation template", default="") parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: 'A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n') parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: '\\nUSER: ')", default="\\nUSER: ") parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: '\\nASSISTANT: ')", default="\\nASSISTANT: ") @@ -33,7 +38,7 @@ def is_present(json, key): return True -use_conversation_template = args.chat_prompt_model != "" +use_conversation_template = args.chat_prompt_model != "" and conversation is not None if use_conversation_template: conv = conversation.get_conv_template(args.chat_prompt_model) From 712c2e90b1f2919c9a1cbe6455ffce8cde795262 Mon Sep 17 00:00:00 2001 From: Elsa Date: Wed, 2 Aug 2023 09:33:23 +0800 Subject: [PATCH 3/4] Use conv.set_system_message from upstream --- examples/server/api_like_OAI.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 50f2750c1..179a57416 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -54,7 +54,7 @@ def convert_chat(messages): for line in messages: if (line["role"] == "system"): try: - conv.set_system_msg(line["content"]) + conv.set_system_message(line["content"]) except Exception: pass elif (line["role"] == "user"): From ea73dace986f05b6b35c799880c7eaea7ee578f4 Mon Sep 17 00:00:00 2001 From: Elsa Date: Mon, 7 Aug 2023 18:09:15 +0800 Subject: [PATCH 4/4] Fix when `stop` in request is null --- examples/server/api_like_OAI.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/api_like_OAI.py b/examples/server/api_like_OAI.py index 179a57416..4049e1f4a 100755 --- a/examples/server/api_like_OAI.py +++ b/examples/server/api_like_OAI.py @@ -103,7 +103,7 @@ def make_postData(body, chat=False, stream=False): postData["stop"] = [stop_token] else: postData["stop"] = [] - if(is_present(body, "stop")): postData["stop"] += body["stop"] + if(is_present(body, "stop")): postData["stop"] += body["stop"] or [] postData["n_keep"] = -1 postData["stream"] = stream