Fix code formatting in README.md

This is about tuning the code formatting of the README file
This commit is contained in:
Romain Neutron 2024-01-30 00:02:03 +01:00 committed by GitHub
parent ceebbb5b21
commit a4c777250e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194

View file

@ -614,9 +614,9 @@ Building the program with BLAS support may lead to some performance improvements
# obtain the original LLaMA model weights and place them in ./models # obtain the original LLaMA model weights and place them in ./models
ls ./models ls ./models
65B 30B 13B 7B tokenizer_checklist.chk tokenizer.model 65B 30B 13B 7B tokenizer_checklist.chk tokenizer.model
# [Optional] for models using BPE tokenizers # [Optional] for models using BPE tokenizers
ls ./models ls ./models
65B 30B 13B 7B vocab.json 65B 30B 13B 7B vocab.json
# install Python dependencies # install Python dependencies
python3 -m pip install -r requirements.txt python3 -m pip install -r requirements.txt
@ -624,8 +624,8 @@ python3 -m pip install -r requirements.txt
# convert the 7B model to ggml FP16 format # convert the 7B model to ggml FP16 format
python3 convert.py models/7B/ python3 convert.py models/7B/
# [Optional] for models using BPE tokenizers # [Optional] for models using BPE tokenizers
python convert.py models/7B/ --vocabtype bpe python convert.py models/7B/ --vocabtype bpe
# quantize the model to 4-bits (using q4_0 method) # quantize the model to 4-bits (using q4_0 method)
./quantize ./models/7B/ggml-model-f16.gguf ./models/7B/ggml-model-q4_0.gguf q4_0 ./quantize ./models/7B/ggml-model-f16.gguf ./models/7B/ggml-model-q4_0.gguf q4_0