llama2.c: update default path for vocab model + readme
This commit is contained in:
parent
c88362d194
commit
df3b81ab29
2 changed files with 9 additions and 9 deletions
|
@ -12,14 +12,14 @@ usage: ./convert-llama2c-to-ggml [options]
|
|||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--copy-vocab-from-model FNAME model path from which to copy vocab (default 'tokenizer.bin')
|
||||
--copy-vocab-from-model FNAME path of gguf llama model or llama2.c vocabulary from which to copy vocab (default 'models/7B/ggml-model-f16.gguf')
|
||||
--llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model
|
||||
--llama2c-output-model FNAME model path to save the converted llama2.c model (default ak_llama_model.bin')
|
||||
```
|
||||
|
||||
An example command using a model from [karpathy/tinyllamas](https://huggingface.co/karpathy/tinyllamas) is as follows:
|
||||
|
||||
`$ ./convert-llama2c-to-ggml --copy-vocab-from-model ../llama2.c/tokenizer.bin --llama2c-model stories42M.bin --llama2c-output-model stories42M.gguf.bin`
|
||||
`$ ./convert-llama2c-to-ggml --copy-vocab-from-model llama-2-7b-chat.gguf.q2_K.bin --llama2c-model stories42M.bin --llama2c-output-model stories42M.gguf.bin`
|
||||
|
||||
Now you can use the model with a command like:
|
||||
|
||||
|
|
|
@ -550,11 +550,11 @@ void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab)
|
|||
|
||||
const int token_idx = gguf_find_key(ctx, "tokenizer.ggml.tokens");
|
||||
GGML_ASSERT(token_idx >= 0);
|
||||
|
||||
|
||||
const int score_idx = gguf_find_key(ctx, "tokenizer.ggml.scores");
|
||||
GGML_ASSERT(score_idx >= 0);
|
||||
const float * scores = (const float * ) gguf_get_arr_data(ctx, score_idx);
|
||||
|
||||
|
||||
const int toktype_idx = gguf_find_key(ctx, "tokenizer.ggml.token_type");
|
||||
GGML_ASSERT(toktype_idx >= 0);
|
||||
const int * toktypes = (const int * ) gguf_get_arr_data(ctx, toktype_idx);
|
||||
|
@ -586,7 +586,7 @@ void load_vocab(const char *filename, Config *config, struct llama_vocab *vocab)
|
|||
float_t score = file.read_f32();
|
||||
uint32_t len = file.read_u32();
|
||||
std::string text = file.read_string(len);
|
||||
|
||||
|
||||
unsigned char byte_val;
|
||||
llama_vocab::ttype type = LLAMA_TOKEN_TYPE_NORMAL;
|
||||
if (id == UNKNOWN_TOKEN_ID) {
|
||||
|
@ -699,7 +699,7 @@ void save_as_llama_model(struct llama_vocab * vocab, struct my_llama_model * mod
|
|||
gguf_set_arr_data(ctx, "tokenizer.ggml.token_type", GGUF_TYPE_INT32, token_types.data(), token_types.size());
|
||||
|
||||
gguf_set_val_str(ctx, "tokenizer.ggml.model", "llama");
|
||||
|
||||
|
||||
gguf_set_val_str(ctx, "general.name", "llama2.c");
|
||||
gguf_set_val_str(ctx, "general.architecture", "llama");
|
||||
|
||||
|
@ -755,7 +755,7 @@ void save_as_llama_model(struct llama_vocab * vocab, struct my_llama_model * mod
|
|||
|
||||
ggml_format_name(layer.w3, TN_FFN_UP, i);
|
||||
gguf_add_tensor(ctx, layer.w3);
|
||||
|
||||
|
||||
ggml_format_name(layer.ffn_norm, TN_FFN_NORM, i);
|
||||
gguf_add_tensor(ctx, layer.ffn_norm);
|
||||
}
|
||||
|
@ -766,7 +766,7 @@ void save_as_llama_model(struct llama_vocab * vocab, struct my_llama_model * mod
|
|||
|
||||
struct train_params get_default_train_params() {
|
||||
struct train_params params;
|
||||
params.fn_vocab_model = "tokenizer.bin";
|
||||
params.fn_vocab_model = "models/7B/ggml-model-f16.gguf";
|
||||
params.fn_llama2c_output_model = "ak_llama_model.bin";
|
||||
params.fn_train_data = "shakespeare.txt";
|
||||
params.fn_checkpoint_in = "checkpoint.bin";
|
||||
|
@ -819,7 +819,7 @@ void print_usage(int /*argc*/, char ** argv, const struct train_params * params)
|
|||
fprintf(stderr, "\n");
|
||||
fprintf(stderr, "options:\n");
|
||||
fprintf(stderr, " -h, --help show this help message and exit\n");
|
||||
fprintf(stderr, " --copy-vocab-from-model FNAME llama2.c vocabulary or ggmlv3 model path from which to copy vocab (default '%s')\n", params->fn_vocab_model);
|
||||
fprintf(stderr, " --copy-vocab-from-model FNAME path of gguf llama model or llama2.c vocabulary from which to copy vocab (default '%s')\n", params->fn_vocab_model);
|
||||
fprintf(stderr, " --llama2c-model FNAME [REQUIRED] model path from which to load Karpathy's llama2.c model\n");
|
||||
fprintf(stderr, " --llama2c-output-model FNAME model path to save the converted llama2.c model (default %s')\n", params->fn_llama2c_output_model);
|
||||
fprintf(stderr, "\n");
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue