From 8dbd0880c44b8e60ea4a5f9cb288c0f9d0fbfc68 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Mon, 6 Jan 2025 12:28:25 +0100 Subject: [PATCH] llama : remove check flash_attn with lora --- src/llama.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/src/llama.cpp b/src/llama.cpp index 60728e5bb..8d8565fbf 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -11519,13 +11519,7 @@ int32_t llama_lora_adapter_set( struct llama_context * ctx, struct llama_lora_adapter * adapter, float scale) { - if (ctx->cparams.flash_attn) { - LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__); - return -1; - } - ctx->lora_adapters[adapter] = scale; - return 0; }