llama : remove memory_f16 and kv_f16 flags

This commit is contained in:
Georgi Gerganov 2023-12-05 18:18:16 +02:00
parent 4adb1d69d9
commit af99c6fbfc
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
6 changed files with 0 additions and 16 deletions

View file

@ -321,7 +321,6 @@ int main(int argc, char ** argv) {
auto cparams = llama_context_default_params();
cparams.n_ctx = 256;
cparams.seed = 1;
cparams.f16_kv = false;
ctx = llama_new_context_with_model(model, cparams);

View file

@ -2108,10 +2108,6 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
}
params.yarn_beta_slow = std::stof(argv[i]);
}
else if (arg == "--memory-f32" || arg == "--memory_f32")
{
params.memory_f16 = false;
}
else if (arg == "--threads" || arg == "-t")
{
if (++i >= argc)