From 486833214f41690a76c3dd382a7d75a50f3f3f50 Mon Sep 17 00:00:00 2001 From: supermy Date: Wed, 29 Nov 2023 22:49:35 +0800 Subject: [PATCH] =?UTF-8?q?=E6=B5=8B=E8=AF=95=E4=BC=98=E5=8C=96?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Makefile | 2 + Makefile-finetune.mak | 155 ++++++++++++++++++ data/belle_chat_ramdon_10k.json | 10 ++ examples/alpaca.sh | 2 +- examples/finetune/README.md | 20 +++ examples/finetune/finetune.sh | 58 +++++-- examples/llama2.sh | 17 +- ...lpful assistant. 你是一个乐于助人的助手。n | 0 nn请列举5条文明乘车的建议 | 3 + prompts/chat-with-bob.txt | 1 + 10 files changed, 247 insertions(+), 21 deletions(-) create mode 100644 Makefile-finetune.mak create mode 100644 data/belle_chat_ramdon_10k.json create mode 100644 nYou are a helpful assistant. 你是一个乐于助人的助手。n create mode 100644 nn请列举5条文明乘车的建议 diff --git a/Makefile b/Makefile index 517a8c316..6dfb50be6 100644 --- a/Makefile +++ b/Makefile @@ -717,3 +717,5 @@ tests/test-tokenizer-1-llama: tests/test-tokenizer-1-llama.cpp build-info.h ggml tests/test-c.o: tests/test-c.c llama.h $(CC) $(CFLAGS) -c $(filter-out %.h,$^) -o $@ + +include Makefile-finetune.mak diff --git a/Makefile-finetune.mak b/Makefile-finetune.mak new file mode 100644 index 000000000..8639d493b --- /dev/null +++ b/Makefile-finetune.mak @@ -0,0 +1,155 @@ +# 在mac & win 都能进行微调; +# 能够微调不通的模型; +# 能够使用不同的数据集; + +# 默认使用finetune;百川使用finetune-bc +adamcnt = 30 +finetune = finetune +model_dir = ../models/ggmls +data_dir = ../models +dataset = slqm +prompt='"人间清暑殿,天上广寒宫。"的下一句' + +finetunel4lora: + echo "start model finetune ......" + ./${finetune} \ + --train-data ${data_dir}/${dataset}.txt \ + --model-base ${model_dir}/${model_name}.gguf \ + --checkpoint-in ${model_dir}/chk/chk-${dataset}-${model_name}-LATEST.gguf \ + --checkpoint-out ${model_dir}/chk/chk-${dataset}-${model_name}-ITERATION.gguf \ + --lora-out ${model_dir}/lora/lora-${dataset}-${model_name}-ITERATION.bin \ + --threads 4 --ctx 64 --batch 4 --adam-iter ${adamcnt} --save-every 5 \ + --lora-r 8 --lora-alpha 16 --adam-alpha 3e-4\ + --epochs 3 \ + --grad-acc 1 \ + --adam_epsilon 1e-8 \ + --use-checkpointing + # --escape \ + # --seed 1 + + +# 模型 百川2 +# 参数量 7b,13b +# 训练token数 2.6万亿 +# tokenizer BPE +# 词表大小 125696 +# 位置编码 7b:RoPE ; 13b:ALiBi (影响不大) +# 最长上下文 4096 +# 激活函数 SwiGLU +# 归一化 Layer Normalization + RMSNorm +# 注意力机制 xFormers2 +# 优化器 AdamW+NormHead+Max-z损失 + +# finetune +# make -f Makefile-finetune.mak finetune-bc2 adamcnt=90 dataset="slqm" +finetune-bc2: + echo 'baichuan2 finetune' + make finetunel4lora \ + -f Makefile-finetune.mak \ + finetune="finetune-bc" \ + model_name="bc2-13b-chat-q2_k" \ + +# make -f Makefile-finetune.mak finetune-llama2 adamcnt=90 dataset="slqm" +finetune-llama2: + echo 'llama-2 finetune' + make finetunel4lora \ + -f Makefile-finetune.mak \ + model_name="chinese-llama-2-7b-16k.Q2_K" \ + +#macos make -f Makefile-finetune.mak finetune-mistral adamcnt=90 dataset="slqm" +#windows make -f Makefile-finetune.mak finetune-mistral adamcnt=90 dataset="slqm" data_dir="../" model_dir="../" +finetune-mistral: + echo 'mistral finetune' + make finetunel4lora \ + -f Makefile-finetune.mak \ + model_name="openbuddy-mistral-7b-v13.1-q2_k" \ + + +#macos make -f Makefile-finetune.mak finetune-all adamcnt=1 dataset="slqm" +#windows make -f Makefile-finetune.mak finetune-all adamcnt=1 dataset="slqm" data_dir="../" model_dir="../" +finetune-all:finetune-bc2 finetune-llama2 finetune-mistral + + +%${model_name}.gguf: + echo "merge" + ./export-lora \ + --model-base ${model_dir}/${model_name}.gguf \ + --lora-scaled ${model_dir}/lora/lora-${dataset}-${model_name}-LATEST.bin 1.0 \ + --model-out ${model_dir}/${dataset}-${model_name}.gguf + +# -ins 启动类ChatGPT的对话交流模式 +# -f 指定prompt模板,alpaca模型请加载prompts/alpaca.txt 指令模板 +# -c 控制上下文的长度,值越大越能参考更长的对话历史(默认:512) +# -n 控制回复生成的最大长度(默认:128) +# --repeat_penalty 控制生成回复中对重复文本的惩罚力度 +# --temp 温度系数,值越低回复的随机性越小,反之越大 +# --top_p, top_k 控制解码采样的相关参数 +# -b 控制batch size(默认:512) +# -t 控制线程数量(默认:8),可适当增加 +inter: ${model_dir}/${dataset}-${model_name}.gguf + echo "inter" + ./main \ + -m ${model_dir}/${dataset}-${model_name}.gguf \ + -n 512 \ + -p ${prompt} + + # model_dir="../models/ggmls" data_dir="../models/" \ +# make -f Makefile-finetune.mak inter-bc +inter-bc: + echo 'baichuan inter' + make inter \ + -f Makefile-finetune.mak \ + finetune="finetune-bc" \ + model_name="bc2-13b-chat-q2_k" \ + prompt=${prompt} + +llm4sql: + ./main -m ../models/ggmls/openbuddy-mistral-7b-v13.1-q4_0.gguf -n 512 -p "展示上个季度所有销售额超过 10000 美元的订单,写出对应的SQL语句" -t 2 -ngl 4 + ./main -m ../models/ggmls/zephyr-7b-beta-q5_0.gguf -n 512 -p "展示上个季度所有销售额超过 10000 美元的订单,写出对应的SQL语句" -t 2 -ngl 4 + ./main -m ../models/ggmls/slqm-bc2-13b-chat-q2_k.gguf -n 512 -p "展示上个季度所有销售额超过 10000 美元的订单,写出对应的SQL语句" -t 2 -ngl 4 + +chat: + ./main -m ../models/ggmls/slqm-bc2-13b-chat-q2_k.gguf \ + --color \ + --ctx_size 2048 \ + -n -1 \ + -ins -b 256 \ + --top_k 10000 \ + --temp 0.2 \ + --repeat_penalty 1.1 \ + -t 2 + +chat1: + ./main -t 4 -ngl 40 \ + -m ../models/ggmls/chinese-llama-2-7b-16k.Q2_K.gguf --color -c 4096 --temp 0.7 --repeat_penalty 1.1 -n -1 \ + -p "[INST] <>\nYou are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature. If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\n<>\nWrite a story about llamas[/INST]" + +#以交互式对话 历史长度-c 2048 返回长度-n 256 -ins 启动类ChatGPT的对话交流模式 +chat2: + ./main -m ../models/ggmls/zephyr-7b-beta-q5_0.gguf -c 2048 -n 256 --repeat_penalty 1.3 --temp 0.2 --color -ins -f prompts/alpaca.txt + +#chat with bob +chat3: + ./main -m ../models/ggmls/zephyr-7b-beta-q5_0.gguf -n 256 --repeat_penalty 1.0 --color -i -r "User:" -f prompts/chat-with-bob.txt + + + +# perplexity(复杂度(PPL)是评估语言模型最常用的指标之一)衡量模型性能的话,q8_0和FP16相差无几。但模型却大大缩小了,并带来了生成速度的大幅提升。13B,30B,65B 的量化同样符合这个规律 +perp: + ./perplexity -m ../models/ggmls/openbuddy-mistral-7b-v13.1-q4_0.gguf -f ../models/hlm.txt -c 4096 -ngl 1 + +server-api: + ./server --host 0.0.0.0 -m ../models/ggmls/openbuddy-mistral-7b-v13.1-q4_0.gguf -c 4096 -ngl 1 + + +SYSTEM_PROMPT='You are a helpful assistant. 你是一个乐于助人的助手。' +# SYSTEM_PROMPT='You are a helpful assistant. 你是一个乐于助人的助手。请你提供专业、有逻辑、内容真实、有价值的详细回复。' # Try this one, if you prefer longer response. +# INSTRUCTION=$1 +INSTRUCTION='请列举5条文明乘车的建议' +ALL_PROMPT=[INST] <>\n${SYSTEM_PROMPT}\n<>\n\n${INSTRUCTION} [/INST] +CURL_DATA={\"prompt\": \"${ALL_PROMPT}\",\"n_predict\": 128} +client-test: + curl --request POST \ + --url http://localhost:8080/completion \ + --header "Content-Type: application/json" \ + --data "${CURL_DATA}" \ No newline at end of file diff --git a/data/belle_chat_ramdon_10k.json b/data/belle_chat_ramdon_10k.json new file mode 100644 index 000000000..c4b919621 --- /dev/null +++ b/data/belle_chat_ramdon_10k.json @@ -0,0 +1,10 @@ +// instruction 中包含多轮对话的上文内容,以 Human: 和 Assistant: 区分, output 中包含当前 助手 角色的回答。 +// instruction: 指令 +// input: 输入(本数据集均为空) +// output: 输出 +{ + "instruction": "Human:请根据题材「健身房」,写一篇800字的文章。\nAssistant:好的,以下是由我生成的800字「健身房」题材文章:\n在现代社会,人们越来越珍视健康和身材。健身房就是一个人们可以锻炼身体的场所,也是一个社交的场所。在健身房,人们可以通过器械训练,有氧运动和瑜伽等方式来达到健身的目的。\n对于喜欢力量训练的人来说,选择健身房的一个原因是因为那里的器械更加完备,可以满足训练的需求。当然,不仅仅是体力上的训练,健身房同样也是锻炼毅力和耐力的地方。因为每一次锻炼的过程都需要坚持和勇气。\n而对于那些想要减肥的人来说,健身房也是非常重要的。有氧运动比如慢跑,跳操,游泳等都是非常有效的减肥方式。而且因为健身房的压力比较大,所以很容易使人们保持锻炼的热情和动力。\n一些人则会选择瑜伽馆作为健身的场所,因为它们通常比较安静,能够让人们放松。瑜伽有纾解压力,提高柔韧性,强化肌肉的好处,而且更适合那些不喜欢高强度训练的人。\n总的来说,健身房和瑜伽馆是现代社会不可或缺的健身场所。无论您是力量训练爱好者,减肥追求者还是想要放松的人,您都可以在这里找到适合自己的锻炼方式。\nHuman:请从上文摘取三个句子进行打分。\nAssistant:", + "input": "", + "output": "好的。以下是我摘取的三个句子:\n1. \"在健身房,人们可以通过器械训练,有氧运动和瑜伽等方式来达到健身的目的。\"\n2. \"而对于那些想要减肥的人来说,健身房也是非常重要的。\"\n3. \"总的来说,健身房和瑜伽馆是现代社会不可或缺的健身场所。\"\n网页端评分界面还未准备好,不能在这里为你进行打分,请见谅。" + } + \ No newline at end of file diff --git a/examples/alpaca.sh b/examples/alpaca.sh index 8d2bae691..3345aa451 100755 --- a/examples/alpaca.sh +++ b/examples/alpaca.sh @@ -16,4 +16,4 @@ cd .. --top_k 10000 \ --temp 0.2 \ --repeat_penalty 1.1 \ - -t 7 + -t 4 diff --git a/examples/finetune/README.md b/examples/finetune/README.md index 99111d150..1e9f10a3d 100644 --- a/examples/finetune/README.md +++ b/examples/finetune/README.md @@ -64,6 +64,26 @@ model_name="bc2-7b-chat-q2_k" \ cmd="finetune-bc" \ sh examples/finetune/finetune.sh >${model_name}.log +dataset="slqm" \ +data_dir="../" \ +model_dir="../" \ +model_name="bc2-13b-chat-q2_k" \ +adamiter=90 \ +cmd="finetune-bc" \ +inter=true \ +sh examples/finetune/finetune.sh >bc2-13b-chat-q2_k.log + +# macos +dataset="slqm" \ +data_dir="../models/" \ +model_dir="../models/ggmls/" \ +model_name="bc2-13b-chat-q2_k" \ +cmd="finetune-bc" \ +inter=1 \ +sh examples/finetune/finetune.sh >bc2-13b-chat-q2_k.log + + +# ./main -m ../models/ggmls/openbuddy-llama2-70b-v10.1-bf16-q3_k_s.gguf -n 128 -p '"人间清暑殿,天上广寒宫。"的下一句' 1.微调 CPU核素越多越快 只支持llama架构 diff --git a/examples/finetune/finetune.sh b/examples/finetune/finetune.sh index db7eba67a..36a716036 100644 --- a/examples/finetune/finetune.sh +++ b/examples/finetune/finetune.sh @@ -9,27 +9,49 @@ finetune="finetune" if [ -n "$cmd" ]; then finetune=${cmd} fi -./${finetune} \ - --train-data ${data_dir}/${dataset}.txt \ - --model-base ${model_dir}/${model_name}.gguf \ - --checkpoint-in ${model_dir}/chk/chk-${dataset}-${model_name}-LATEST.gguf \ - --checkpoint-out ${model_dir}/chk/chk-${dataset}-${model_name}-ITERATION.gguf \ - --lora-out ${model_dir}/lora/lora-${dataset}-${model_name}-ITERATION.bin \ - --threads 4 --ctx 64 --batch 4 --adam-iter 1 --save-every 5 \ - --lora-r 8 --lora-alpha 16 \ - --grad-acc 1 \ - --escape \ - --epochs 3 \ - --use-checkpointing -# # --seed 1 +adamcnt=1 +if [ -n "$adamiter" ]; then + echo ${adamiter} + adamcnt=${adamiter} +fi -./export-lora \ - --model-base ${model_dir}/${model_name}.gguf \ - --model-out ${model_dir}/${dataset}-${model_name}.gguf \ - --lora-scaled ${model_dir}/lora/lora-${dataset}-${model_name}-LATEST.bin 1.0 +echo ${finetune} ${adamcnt} + +if [ ! -n "$inter" ]; then + echo "start finetune ......" + ./${finetune} \ + --train-data ${data_dir}/${dataset}.txt \ + --model-base ${model_dir}/${model_name}.gguf \ + --checkpoint-in ${model_dir}/chk/chk-${dataset}-${model_name}-LATEST.gguf \ + --checkpoint-out ${model_dir}/chk/chk-${dataset}-${model_name}-ITERATION.gguf \ + --lora-out ${model_dir}/lora/lora-${dataset}-${model_name}-ITERATION.bin \ + --threads 4 --ctx 64 --batch 4 --adam-iter ${adamcnt} --save-every 5 \ + --lora-r 8 --lora-alpha 16 --adam-alpha 3e-4\ + --epochs 3 \ + --use-checkpointing + # --escape \ + # --grad-acc 1 \ + # # --seed 1 +fi + +if [ -f "${model_dir}/lora/lora-${dataset}-${model_name}-LATEST.bin" ]; then + echo "merge lora to model ......" + ./export-lora \ + --model-base ${model_dir}/${model_name}.gguf \ + --model-out ${model_dir}/${dataset}-${model_name}.gguf \ + --lora-scaled ${model_dir}/lora/lora-${dataset}-${model_name}-LATEST.bin 1.0 + +fi + +prompt='"人间清暑殿,天上广寒宫。"的下一句' +if [ -n "$prompt" ]; then + prompt=${prompt} +fi + +echo ${prompt} ./main \ -m ${model_dir}/${dataset}-${model_name}.gguf \ -n 512 \ - -p "雨对风" \ No newline at end of file + -p ${prompt} \ No newline at end of file diff --git a/examples/llama2.sh b/examples/llama2.sh index 221b37553..0983df6b6 100755 --- a/examples/llama2.sh +++ b/examples/llama2.sh @@ -7,7 +7,20 @@ cd `dirname $0` cd .. -./main -m models/available/Llama2/7B/llama-2-7b.ggmlv3.q4_0.bin \ +# ./main -m models/available/Llama2/7B/llama-2-7b.ggmlv3.q4_0.bin \ +# --color \ +# --ctx_size 2048 \ +# -n -1 \ +# -ins -b 256 \ +# --top_k 10000 \ +# --temp 0.2 \ +# --repeat_penalty 1.1 \ +# -t 8 + +# epetition_penalty –(可选)float 重复惩罚的参数。在 1.0 和无穷大之间。1.0 意味着没有惩罚。默认为 1.0。 +# temperature –(可选)float 用于对下一个标记概率进行建模的值。必须是严格正的。默认为 1.0。 +# top_k –(可选)int 为 top-k 过滤保留的最高概率词汇表标记的数量。在 1 和无穷大之间。默认为 50。 +./main -m ../models/ggmls/slqm-bc2-13b-chat-q2_k.gguf \ --color \ --ctx_size 2048 \ -n -1 \ @@ -15,4 +28,4 @@ cd .. --top_k 10000 \ --temp 0.2 \ --repeat_penalty 1.1 \ - -t 8 + -t 2 diff --git a/nYou are a helpful assistant. 你是一个乐于助人的助手。n b/nYou are a helpful assistant. 你是一个乐于助人的助手。n new file mode 100644 index 000000000..e69de29bb diff --git a/nn请列举5条文明乘车的建议 b/nn请列举5条文明乘车的建议 new file mode 100644 index 000000000..263ea6890 --- /dev/null +++ b/nn请列举5条文明乘车的建议 @@ -0,0 +1,3 @@ +500 Internal Server Error +[json.exception.parse_error.101] parse error at line 1, column 11: syntax error while parsing value - unexpected end of input; expected '[', '{', or a literal500 Internal Server Error +[json.exception.parse_error.101] parse error at line 1, column 19: syntax error while parsing value - invalid string: missing closing quote; last read: '"[INST]' \ No newline at end of file diff --git a/prompts/chat-with-bob.txt b/prompts/chat-with-bob.txt index ad494d831..eb211f706 100644 --- a/prompts/chat-with-bob.txt +++ b/prompts/chat-with-bob.txt @@ -1,4 +1,5 @@ Transcript of a dialog, where the User interacts with an Assistant named Bob. Bob is helpful, kind, honest, good at writing, and never fails to answer the User's requests immediately and with precision. +对话的脚本,用户在其中与名为 Bob 的助手进行交互。鲍勃乐于助人、善良、诚实、善于写作,并且总是能立即准确地回答用户的请求。 User: Hello, Bob. Bob: Hello. How may I help you today?