fix warnings

This commit is contained in:
xaedes 2023-08-30 16:55:49 +02:00
parent 4e986ac4bc
commit 0c57f9f0b3
No known key found for this signature in database
GPG key ID: 30030EDD817EA2B1
3 changed files with 9 additions and 19 deletions

View file

@ -724,7 +724,7 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
return std::make_tuple(nullptr, nullptr);
}
for (int i = 0; i < params.lora_adapter.size(); ++i) {
for (unsigned int i = 0; i < params.lora_adapter.size(); ++i) {
const std::string& lora_adapter = std::get<0>(params.lora_adapter[i]);
float lora_scale = std::get<1>(params.lora_adapter[i]);
int err = llama_model_apply_lora_from_file(model,

View file

@ -406,18 +406,13 @@ void init_model(struct llama_model * input, struct my_llama_model * model, uint3
hparams.n_layer = llama_model_n_layer(input);
hparams.n_rot = llama_model_n_rot(input);
const uint32_t n_embd = hparams.n_embd;
const uint32_t n_layer = hparams.n_layer;
const uint32_t n_vocab = hparams.n_vocab;
const uint32_t n_ff = hparams.n_ff;
model->tok_embeddings = llama_get_model_tensor(input, tn(LLM_TENSOR_TOKEN_EMBD));
model->norm = llama_get_model_tensor(input, tn(LLM_TENSOR_OUTPUT_NORM));
model->output = llama_get_model_tensor(input, tn(LLM_TENSOR_OUTPUT));
model->layers.resize(n_layer);
model->layers.resize(hparams.n_layer);
for (uint32_t i = 0; i < n_layer; ++i) {
for (uint32_t i = 0; i < hparams.n_layer; ++i) {
auto & layer = model->layers[i];
layer.attention_norm = llama_get_model_tensor(input, tni(LLM_TENSOR_ATTN_NORM, i));
@ -654,7 +649,7 @@ struct ggml_tensor * llama_build_lora_finetune_graphs(
const float rope_freq_base = lora->hparams.rope_freq_base;
const float rope_freq_scale = lora->hparams.rope_freq_scale;
GGML_ASSERT(n_layer == lora->layers.size());
GGML_ASSERT((size_t) n_layer == lora->layers.size());
auto set_name = [](struct ggml_tensor * t, const char * n) {
ggml_set_name(t, n);
@ -828,15 +823,12 @@ struct ggml_tensor * llama_build_lora_finetune_graphs(
// allocating checkpoints in one block to reduce memory fragmentation
// note: they will be freed in reverse order
for (int i = 0; i < checkpoints.size(); ++i) {
for (unsigned int i = 0; i < checkpoints.size(); ++i) {
if (checkpoints[i]->data == NULL && checkpoints[i]->view_src == NULL) {
ggml_allocr_alloc(alloc, checkpoints[i]);
}
}
int n_leafs_after = gb->n_leafs;
int n_nodes_after = gb->n_nodes;
ggml_allocr_alloc_graph(alloc, gb);
// remove the additional nodes and leafs

10
ggml.c
View file

@ -4851,7 +4851,6 @@ struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
}
void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) {
const int64_t ne3 = tensor->ne[3];
const int64_t ne2 = tensor->ne[2];
const int64_t ne1 = tensor->ne[1];
const int64_t ne0 = tensor->ne[0];
@ -16214,16 +16213,16 @@ struct hash_map {
void * vals[GGML_GRAPH_HASHTABLE_SIZE];
};
struct hash_map * new_hash_map() {
static struct hash_map * new_hash_map(void) {
struct hash_map * result = malloc(sizeof(struct hash_map));
for (int i=0; i<GGML_GRAPH_HASHTABLE_SIZE; ++i) {
result->keys[i] = NULL;
result->vals[i] = NULL;
}
return result;
};
}
void free_hash_map(struct hash_map * map) {
static void free_hash_map(struct hash_map * map) {
free(map);
}
@ -19176,7 +19175,6 @@ static enum ggml_opt_result linesearch_backtracking(
float * step,
const float * xp,
struct ggml_tensor * f,
struct ggml_cgraph * gf,
struct ggml_cgraph * gb,
struct ggml_cplan * cplan,
const int np,
@ -19421,7 +19419,7 @@ static enum ggml_opt_result ggml_opt_lbfgs(
ggml_vec_cpy_f32(nx, xp, x);
ggml_vec_cpy_f32(nx, gp, g);
ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gf, gb, &cplan, np, ps, callback, callback_data);
ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, callback, callback_data);
if (ls < 0) {
// linesearch failed - go back to the previous point and return