llama : remove memory_f16 and kv_f16 flags
This commit is contained in:
parent
4adb1d69d9
commit
af99c6fbfc
6 changed files with 0 additions and 16 deletions
|
@ -321,7 +321,6 @@ int main(int argc, char ** argv) {
|
|||
auto cparams = llama_context_default_params();
|
||||
cparams.n_ctx = 256;
|
||||
cparams.seed = 1;
|
||||
cparams.f16_kv = false;
|
||||
|
||||
ctx = llama_new_context_with_model(model, cparams);
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue