add mixtral 7bv0.1 q8 lora in gguff format, so we can test perplexity
This commit is contained in:
parent
da41206bd6
commit
2b137c54bd
1 changed files with 5 additions and 4 deletions
|
@ -640,11 +640,12 @@ function gg_run_mistral_7b_v0_1 {
|
||||||
lora_shakespeare="${path_lora}/ggml-adapter-model.bin"
|
lora_shakespeare="${path_lora}/ggml-adapter-model.bin"
|
||||||
|
|
||||||
# TODO - we need mistral lora
|
# TODO - we need mistral lora
|
||||||
gg_wget ${path_lora} https://huggingface.co/slaren/open_llama_7b_v2_shakespeare_lora/resolve/main/adapter_config.json
|
|
||||||
gg_wget ${path_lora} https://huggingface.co/slaren/open_llama_7b_v2_shakespeare_lora/resolve/main/adapter_model.bin
|
|
||||||
gg_wget ${path_shakespeare} https://huggingface.co/slaren/open_llama_7b_v2_shakespeare_lora/resolve/main/shakespeare.txt
|
|
||||||
|
|
||||||
python3 ../convert-lora-to-ggml.py ${path_lora}
|
gg_wget ${path_lora} https://huggingface.co/datasets/segmond/mistral_7b_v0_1_q8_0_shakespeare_lora/blob/main/adapter_model.bin
|
||||||
|
gg_wget ${path_shakespeare} https://huggingface.co/datasets/segmond/mistral_7b_v0_1_q8_0_shakespeare_lora/blob/main/shakespeare.txt
|
||||||
|
|
||||||
|
# model is in gguf format since we finetuned from gguf
|
||||||
|
#python3 ../convert-lora-to-ggml.py ${path_lora}
|
||||||
|
|
||||||
# f16
|
# f16
|
||||||
(time ./bin/perplexity --model ${model_f16} -f ${shakespeare} -t 1 -ngl 999 -c 2048 -b 512 --chunks 3 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-f16.log
|
(time ./bin/perplexity --model ${model_f16} -f ${shakespeare} -t 1 -ngl 999 -c 2048 -b 512 --chunks 3 ) 2>&1 | tee -a $OUT/${ci}-ppl-shakespeare-f16.log
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue