Cleanup
This commit is contained in:
parent
308c817af4
commit
92ff0de243
3 changed files with 0 additions and 177 deletions
|
@ -2916,12 +2916,8 @@ class OpenELM(Model):
|
|||
head_dim = self.find_hparam(["head_dim"])
|
||||
n_head = n_embd // head_dim
|
||||
rot_pct = 1.0
|
||||
|
||||
self.gguf_writer.add_context_length(self.find_hparam(["max_context_length"]))
|
||||
# self.gguf_writer.add_embedding_length(n_embd)
|
||||
self.gguf_writer.add_block_count(self.block_count)
|
||||
# self.gguf_writer.add_head_count(n_head)
|
||||
# self.gguf_writer.add_head_count_kv(n_head)
|
||||
self.gguf_writer.add_head_count_kv(n_head*10)
|
||||
self.gguf_writer.add_head_count(n_head*10)
|
||||
self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head)
|
||||
|
@ -2979,8 +2975,6 @@ class OpenELM(Model):
|
|||
def write_tensors(self):
|
||||
block_count = self.hparams.get("num_transformer_layers", self.hparams.get("num_hidden_layers", self.hparams.get("num_transformer_layers")))
|
||||
tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
|
||||
n_head = self.hparams.get("model_dim") // self.hparams.get("head_dim") # TODO: propagate this
|
||||
foobar = {}
|
||||
for name, data_torch in self.get_tensors():
|
||||
old_dtype = data_torch.dtype
|
||||
# convert any unsupported data types to float32
|
||||
|
@ -3006,8 +3000,6 @@ class OpenELM(Model):
|
|||
data = data.astype(np.float16)
|
||||
print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
|
||||
self.gguf_writer.add_tensor(new_name, data)
|
||||
foobar[name] = (data_torch, new_name, data)
|
||||
foobar
|
||||
|
||||
|
||||
###### CONVERSION LOGIC ######
|
||||
|
|
|
@ -38,115 +38,6 @@ static std::string ggml_nb_string(const ggml_tensor * t) {
|
|||
return str;
|
||||
}
|
||||
|
||||
|
||||
static void ggml_print_tensor(uint8_t * data, ggml_type type, const int64_t * ne, const size_t * nb, int64_t n) {
|
||||
GGML_ASSERT(n > 0);
|
||||
float sum = 0;
|
||||
|
||||
for (int64_t i0 = 0; i0 < 3; i0++) {
|
||||
if (i0 == n && ne[0] > 2*n) {
|
||||
printf("..., ");
|
||||
i0 = ne[0] - n;
|
||||
}
|
||||
size_t i = i0;//i3 * nb[3] + i2 * nb[2] + i1 * nb[1] + i0 * nb[0];
|
||||
float v;
|
||||
if (type == GGML_TYPE_F16) {
|
||||
v = ggml_fp16_to_fp32(*(ggml_fp16_t *) data + i);
|
||||
} else if (type == GGML_TYPE_F32) {
|
||||
v = *(float *) data + i;
|
||||
} else if (type == GGML_TYPE_I32) {
|
||||
v = (float) *((int32_t *) data + i);
|
||||
} else if (type == GGML_TYPE_I16) {
|
||||
v = (float) *(int16_t *) data + i;
|
||||
} else if (type == GGML_TYPE_I8) {
|
||||
v = (float) *(int8_t *) data + i;
|
||||
} else {
|
||||
GGML_ASSERT(false);
|
||||
}
|
||||
printf("%12.4f", v);
|
||||
sum += v;
|
||||
}
|
||||
printf("\n");
|
||||
|
||||
|
||||
|
||||
|
||||
// for (int64_t i3 = 0; i3 < ne[3]; i3++) {
|
||||
// printf(" [\n");
|
||||
// for (int64_t i2 = 0; i2 < ne[2]; i2++) {
|
||||
// if (i2 == n && ne[2] > 2*n) {
|
||||
// printf(" ..., \n");
|
||||
// i2 = ne[2] - n;
|
||||
// }
|
||||
// printf(" [\n");
|
||||
// for (int64_t i1 = 0; i1 < ne[1]; i1++) {
|
||||
// if (i1 == n && ne[1] > 2*n) {
|
||||
// printf(" ..., \n");
|
||||
// i1 = ne[1] - n;
|
||||
// }
|
||||
// printf(" [");
|
||||
// for (int64_t i0 = 0; i0 < ne[0]; i0++) {
|
||||
// if (i0 == n && ne[0] > 2*n) {
|
||||
// printf("..., ");
|
||||
// i0 = ne[0] - n;
|
||||
// }
|
||||
// size_t i = i0;//i3 * nb[3] + i2 * nb[2] + i1 * nb[1] + i0 * nb[0];
|
||||
// float v;
|
||||
// if (type == GGML_TYPE_F16) {
|
||||
// v = ggml_fp16_to_fp32(*(ggml_fp16_t *) data + i);
|
||||
// } else if (type == GGML_TYPE_F32) {
|
||||
// v = *(float *) data + i;
|
||||
// } else if (type == GGML_TYPE_I32) {
|
||||
// v = (float) *((int32_t *) data + i);
|
||||
// } else if (type == GGML_TYPE_I16) {
|
||||
// v = (float) *(int16_t *) data + i;
|
||||
// } else if (type == GGML_TYPE_I8) {
|
||||
// v = (float) *(int8_t *) data + i;
|
||||
// } else {
|
||||
// GGML_ASSERT(false);
|
||||
// }
|
||||
// printf("%12.4f", v);
|
||||
// sum += v;
|
||||
// if (i0 < ne[0] - 1) printf(", ");
|
||||
// }
|
||||
// printf("],\n");
|
||||
// }
|
||||
// printf(" ],\n");
|
||||
// }
|
||||
// printf(" ]\n");
|
||||
// printf(" sum = %f\n", sum);
|
||||
// }
|
||||
}
|
||||
|
||||
float Sum(float *arr, int64_t N){
|
||||
float s = 0.0;
|
||||
for (int i = 0; i < N; i++){
|
||||
s += arr[i];
|
||||
}
|
||||
return s;
|
||||
}
|
||||
float PrintArr(const char * name, float * arr, int64_t N){
|
||||
float sum = 0.0;
|
||||
if (arr != NULL){
|
||||
sum = Sum(arr, N);
|
||||
printf("%s %d %10f \n",name, N, sum);
|
||||
} else {
|
||||
printf("%s %d %10f \n",name, 0, 0.0);
|
||||
}
|
||||
return sum;
|
||||
}
|
||||
|
||||
size_t get_nth_element(const int64_t *ne, const size_t *nb, int64_t nth) {
|
||||
size_t offset = 0;
|
||||
size_t divisor = 1;
|
||||
for (int i = 3; i >= 0; --i) {
|
||||
size_t index = size_t(floor(nth / divisor)) % ne[i];
|
||||
offset += index * nb[i]/4;
|
||||
divisor *= ne[i];
|
||||
}
|
||||
return offset;
|
||||
}
|
||||
|
||||
void print_tensor(const ggml_tensor * src0) {
|
||||
float sum = 0;
|
||||
|
||||
|
@ -164,8 +55,6 @@ void print_tensor(const ggml_tensor * src0) {
|
|||
if (i == n) {
|
||||
buf2 += sprintf(buf2, "..., ");
|
||||
}
|
||||
// int64_t offset = get_nth_element(src0->ne, src0->nb, i);
|
||||
// offset *= ggml_element_size(src0);
|
||||
int64_t offset = i;
|
||||
float v;
|
||||
if (type == GGML_TYPE_F16) {
|
||||
|
@ -186,17 +75,6 @@ void print_tensor(const ggml_tensor * src0) {
|
|||
}
|
||||
sum += v;
|
||||
}
|
||||
|
||||
int i = 0;
|
||||
while (i < ggml_nbytes(src0)/4){
|
||||
float val = (((float *) src0->data)[i]);
|
||||
float diff = abs(val - 0.0022226818837225437164306640625);
|
||||
if (diff < 0.000001 ){
|
||||
printf("found %s: %d = %f\n", src0->name, i, val);
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
int max_name_length = 15;
|
||||
int max_dim_length = 15;
|
||||
int max_str_length = 15;
|
||||
|
@ -206,10 +84,6 @@ void print_tensor(const ggml_tensor * src0) {
|
|||
max_dim_length, ggml_ne_string(src0).c_str(),
|
||||
max_str_length, ggml_nb_string(src0).c_str(),
|
||||
src0->data);
|
||||
|
||||
|
||||
|
||||
// printf("%s\n", buf);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -231,50 +105,19 @@ static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) {
|
|||
if (ask) {
|
||||
return true; // Always retrieve data
|
||||
}
|
||||
|
||||
char src1_str[128] = {0};
|
||||
// if (src1) {
|
||||
// sprintf(src1_str, "%s{%s}\n", src1->name, ggml_ne_string(src1).c_str());
|
||||
// }
|
||||
|
||||
if (src0) {
|
||||
print_tensor(src0);
|
||||
// printf("%s{%s} n=%d %f\n", src0->name, ggml_ne_string(src0).c_str(),src0->ne[0], Sum(static_cast<float *>(src0->data), src0->ne[0]));
|
||||
// printf("%s{%s}", src0->name, ggml_ne_string(src0).c_str());
|
||||
// enum ggml_type type = src0->name == "inp_tokens" ? GGML_TYPE_I32:src0->type;
|
||||
// ggml_print_tensor(static_cast<uint8_t *>(src0->data), src0->type, src0->ne, src0->nb, 3);
|
||||
// PrintArr(src0->name, static_cast<float *>(src0->data), src0->ne[0]);
|
||||
}
|
||||
if (src1) {
|
||||
print_tensor(src1);
|
||||
// printf("%s{%s} n=%d %f\n", src1->name, ggml_ne_string(src1).c_str(),src0->ne[0], Sum(static_cast<float *>(src1->data), src1->ne[0]));
|
||||
// enum ggml_type type = src1->name == "inp_tokens" ? GGML_TYPE_I32:src1->type;
|
||||
// ggml_print_tensor(static_cast<uint8_t *>(src1->data), type, src1->ne, src1->nb, 3);
|
||||
// ggml_print_tensor(static_cast<uint8_t *>(src1->data), src1->type, src1->ne, src1->nb, 3);
|
||||
// PrintArr(src1->name, static_cast<float *>(src1->data), src1->ne[0]);
|
||||
}
|
||||
printf("%s ==\n", ggml_op_desc(t));
|
||||
if (t) {
|
||||
print_tensor(t);
|
||||
// printf("%s{%s} n=%d %f\n", t->name, ggml_ne_string(t).c_str(),src0->ne[0], Sum(static_cast<float *>(t->data), t->ne[0]));
|
||||
// printf("%s{%s}", t->name, ggml_ne_string(t).c_str());
|
||||
// PrintArr(t->name, static_cast<float *>(t->data), t->ne[0]);
|
||||
// ggml_print_tensor(static_cast<uint8_t *>(t->data), t->type, t->ne, t->nb, 3);
|
||||
// printf("\n == \n");
|
||||
}
|
||||
printf("\n\n");
|
||||
|
||||
|
||||
|
||||
|
||||
// printf("%24s = (%s) %10s(%s{%s}, %s}) = {%s}\n",
|
||||
//
|
||||
//
|
||||
// t->name, ggml_op_desc(t), src0->name, ggml_ne_string(src0).c_str(),
|
||||
// src1 ? src1_str : "",
|
||||
// ggml_ne_string(t).c_str());
|
||||
|
||||
|
||||
// copy the data from the GPU memory if needed
|
||||
const bool is_host = ggml_backend_buffer_is_host(t->buffer);
|
||||
|
||||
|
@ -286,7 +129,6 @@ static bool ggml_debug(struct ggml_tensor * t, bool ask, void * user_data) {
|
|||
|
||||
if (!ggml_is_quantized(t->type)) {
|
||||
uint8_t * data = is_host ? (uint8_t *) t->data : cb_data->data.data();
|
||||
// ggml_print_tensor(data, t->type, t->ne, t->nb, 3);
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
11
llama.cpp
11
llama.cpp
|
@ -6955,17 +6955,6 @@ struct llm_build_context {
|
|||
return lctx.inp_KQ_mask;
|
||||
}
|
||||
|
||||
struct ggml_tensor * build_inp_KQ_mask2(int64_t n_kv, bool causal = true) {
|
||||
if (causal) {
|
||||
lctx.inp_KQ_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, n_tokens);
|
||||
} else {
|
||||
lctx.inp_KQ_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
|
||||
}
|
||||
cb(lctx.inp_KQ_mask, "KQ_mask", -1);
|
||||
ggml_set_input(lctx.inp_KQ_mask);
|
||||
return lctx.inp_KQ_mask;
|
||||
}
|
||||
|
||||
struct ggml_tensor * build_inp_KQ_pos() {
|
||||
lctx.inp_KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, n_kv);
|
||||
cb(lctx.inp_KQ_pos, "KQ_pos", -1);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue