llama/ggml: add LLM training support
more compact progress bar refactor: llama_prepare_sbatch/ubatch llama_save_model_to_file gqa_mode arg for repeat_back llama_opt_param_filter ggml_graph_dup force_grads refactor ggml_opt, fix test-opt
This commit is contained in:
parent
a5203b4465
commit
c25557362a
26 changed files with 1294 additions and 339 deletions
|
@ -697,3 +697,9 @@ const char * const LLM_KV_SPLIT_COUNT = "split.count";
|
|||
const char * const LLM_KV_SPLIT_TENSORS_COUNT = "split.tensors.count";
|
||||
|
||||
}
|
||||
|
||||
//
|
||||
// training utils
|
||||
//
|
||||
|
||||
ggml_opt_dataset_t common_opt_dataset_init(struct llama_context * ctx, const std::vector<llama_token> & tokens, int64_t stride);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue