squash! clip : suppress unused variable warnings
Remove e (/*e*/) instead instead of using GGML_UNUSED.
This commit is contained in:
parent
c32bad7e65
commit
bc9c9a8a82
1 changed files with 13 additions and 21 deletions
|
@ -1121,24 +1121,21 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
}
|
}
|
||||||
if (n < 32)
|
if (n < 32)
|
||||||
hparams.image_grid_pinpoints[n] = 0;
|
hparams.image_grid_pinpoints[n] = 0;
|
||||||
} catch (std::runtime_error & e) {
|
} catch (std::runtime_error & /*e*/) {
|
||||||
hparams.image_grid_pinpoints[0]=0;
|
hparams.image_grid_pinpoints[0]=0;
|
||||||
GGML_UNUSED(e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
int idx = get_key_idx(ctx, KEY_MM_PATCH_MERGE_TYPE);
|
int idx = get_key_idx(ctx, KEY_MM_PATCH_MERGE_TYPE);
|
||||||
strcpy(hparams.mm_patch_merge_type, gguf_get_val_str(ctx, idx));
|
strcpy(hparams.mm_patch_merge_type, gguf_get_val_str(ctx, idx));
|
||||||
} catch (std::runtime_error & e) {
|
} catch (std::runtime_error & /*e*/) {
|
||||||
strcpy(hparams.mm_patch_merge_type, "flat");
|
strcpy(hparams.mm_patch_merge_type, "flat");
|
||||||
GGML_UNUSED(e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
hparams.image_crop_resolution = get_u32(ctx, KEY_IMAGE_CROP_RESOLUTION); // llava-1.6
|
hparams.image_crop_resolution = get_u32(ctx, KEY_IMAGE_CROP_RESOLUTION); // llava-1.6
|
||||||
} catch(const std::exception& e) {
|
} catch(const std::exception& /*e*/) {
|
||||||
hparams.image_crop_resolution = hparams.image_size;
|
hparams.image_crop_resolution = hparams.image_size;
|
||||||
GGML_UNUSED(e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int idx_mean = get_key_idx(ctx, KEY_IMAGE_MEAN);
|
int idx_mean = get_key_idx(ctx, KEY_IMAGE_MEAN);
|
||||||
|
@ -1176,43 +1173,38 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
try {
|
try {
|
||||||
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
|
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
|
||||||
new_clip->has_class_embedding = true;
|
new_clip->has_class_embedding = true;
|
||||||
} catch (const std::exception& e) {
|
} catch (const std::exception& /*e*/) {
|
||||||
new_clip->has_class_embedding = false;
|
new_clip->has_class_embedding = false;
|
||||||
GGML_UNUSED(e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
|
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
|
||||||
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
|
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
|
||||||
new_clip->has_pre_norm = true;
|
new_clip->has_pre_norm = true;
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & /*e*/) {
|
||||||
new_clip->has_pre_norm = false;
|
new_clip->has_pre_norm = false;
|
||||||
GGML_UNUSED(e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight"));
|
vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight"));
|
||||||
vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias"));
|
vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias"));
|
||||||
new_clip->has_post_norm = true;
|
new_clip->has_post_norm = true;
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & /*e*/) {
|
||||||
new_clip->has_post_norm = false;
|
new_clip->has_post_norm = false;
|
||||||
GGML_UNUSED(e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
vision_model.patch_bias = get_tensor(new_clip->ctx_data, TN_PATCH_BIAS);
|
vision_model.patch_bias = get_tensor(new_clip->ctx_data, TN_PATCH_BIAS);
|
||||||
new_clip->has_patch_bias = true;
|
new_clip->has_patch_bias = true;
|
||||||
} catch (std::exception & e) {
|
} catch (std::exception & /*e*/) {
|
||||||
new_clip->has_patch_bias = false;
|
new_clip->has_patch_bias = false;
|
||||||
GGML_UNUSED(e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
|
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
|
||||||
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
|
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
|
||||||
} catch(const std::exception& e) {
|
} catch(const std::exception& /*e*/) {
|
||||||
LOG_TEE("%s: failed to load vision model tensors\n", __func__);
|
LOG_TEE("%s: failed to load vision model tensors\n", __func__);
|
||||||
GGML_UNUSED(e);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// LLaVA projection
|
// LLaVA projection
|
||||||
|
@ -1223,26 +1215,26 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
// Yi-type llava
|
// Yi-type llava
|
||||||
vision_model.mm_1_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "weight"));
|
vision_model.mm_1_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "weight"));
|
||||||
vision_model.mm_1_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "bias"));
|
vision_model.mm_1_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "bias"));
|
||||||
} catch (std::runtime_error & e) { GGML_UNUSED(e); }
|
} catch (std::runtime_error & /*e*/) { }
|
||||||
try {
|
try {
|
||||||
// missing in Yi-type llava
|
// missing in Yi-type llava
|
||||||
vision_model.mm_2_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight"));
|
vision_model.mm_2_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight"));
|
||||||
vision_model.mm_2_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias"));
|
vision_model.mm_2_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias"));
|
||||||
} catch (std::runtime_error & e) { GGML_UNUSED(e); }
|
} catch (std::runtime_error & /*e*/) { }
|
||||||
try {
|
try {
|
||||||
// Yi-type llava
|
// Yi-type llava
|
||||||
vision_model.mm_3_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "weight"));
|
vision_model.mm_3_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "weight"));
|
||||||
vision_model.mm_3_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "bias"));
|
vision_model.mm_3_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "bias"));
|
||||||
} catch (std::runtime_error & e) { GGML_UNUSED(e); }
|
} catch (std::runtime_error & /*e*/) { }
|
||||||
try {
|
try {
|
||||||
// Yi-type llava
|
// Yi-type llava
|
||||||
vision_model.mm_4_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "weight"));
|
vision_model.mm_4_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "weight"));
|
||||||
vision_model.mm_4_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "bias"));
|
vision_model.mm_4_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "bias"));
|
||||||
} catch (std::runtime_error & e) { GGML_UNUSED(e); }
|
} catch (std::runtime_error & /*e*/) { }
|
||||||
try {
|
try {
|
||||||
vision_model.image_newline = get_tensor(new_clip->ctx_data, TN_IMAGE_NEWLINE);
|
vision_model.image_newline = get_tensor(new_clip->ctx_data, TN_IMAGE_NEWLINE);
|
||||||
// LOG_TEE("%s: image_newline tensor (llava-1.6) found\n", __func__);
|
// LOG_TEE("%s: image_newline tensor (llava-1.6) found\n", __func__);
|
||||||
} catch (std::runtime_error & e) { GGML_UNUSED(e); }
|
} catch (std::runtime_error & /*e*/) { }
|
||||||
} else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) {
|
} else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) {
|
||||||
// MobileVLM projection
|
// MobileVLM projection
|
||||||
vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight"));
|
vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight"));
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue