llama : remove check flash_attn with lora

This commit is contained in:
Xuan Son Nguyen 2025-01-06 12:28:25 +01:00
parent 6369f867a4
commit 8dbd0880c4

View file

@ -11519,13 +11519,7 @@ int32_t llama_lora_adapter_set(
struct llama_context * ctx, struct llama_context * ctx,
struct llama_lora_adapter * adapter, struct llama_lora_adapter * adapter,
float scale) { float scale) {
if (ctx->cparams.flash_attn) {
LLAMA_LOG_ERROR("%s: flash_attn is not compatible with LoRA\n", __func__);
return -1;
}
ctx->lora_adapters[adapter] = scale; ctx->lora_adapters[adapter] = scale;
return 0; return 0;
} }