improve handling of not yet supported tensor types
This commit is contained in:
parent
ad64e33aad
commit
166065837e
1 changed files with 4 additions and 2 deletions
|
@ -644,9 +644,11 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
|
||||||
auto add_to_f32 = [] (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
|
auto add_to_f32 = [] (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
|
||||||
if (ggml_is_quantized(a->type)) {
|
if (ggml_is_quantized(a->type)) {
|
||||||
return ggml_add_cast(ctx, a, b, GGML_TYPE_F32);
|
return ggml_add_cast(ctx, a, b, GGML_TYPE_F32);
|
||||||
} else {
|
} else if (a->type == GGML_TYPE_F32) {
|
||||||
GGML_ASSERT(a->type == GGML_TYPE_F32);
|
|
||||||
return ggml_add(ctx, a, b);
|
return ggml_add(ctx, a, b);
|
||||||
|
} else {
|
||||||
|
die_fmt("%s: Finetuning on tensors with type '%s' is not yet supported.\n",
|
||||||
|
__func__, ggml_type_name(a->type));
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue