add debug code
This commit is contained in:
parent
f31b6f4e2d
commit
3ec51c035c
2 changed files with 44 additions and 1 deletions
42
ggml.c
42
ggml.c
|
@ -9658,6 +9658,14 @@ static void ggml_compute_forward_div_f32(
|
|||
}
|
||||
}
|
||||
}
|
||||
printf("lj.ggml_compute_forward_mul = %f", *(float*)dst->data);
|
||||
|
||||
for (int i = 1; i < 3; i++) {
|
||||
|
||||
printf(" %f", ((float*)dst->data)[i]);
|
||||
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
static void ggml_compute_forward_div(
|
||||
|
@ -10897,6 +10905,14 @@ static void ggml_compute_forward_rms_norm_f32(
|
|||
}
|
||||
}
|
||||
}
|
||||
printf("lj.ggml_compute_forward_rms = %f", *(float*)dst->data);
|
||||
|
||||
for (int i = 1; i < 3; i++) {
|
||||
|
||||
printf(" %f", ((float*)dst->data)[i]);
|
||||
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
static void ggml_compute_forward_rms_norm(
|
||||
|
@ -11423,6 +11439,14 @@ static void ggml_compute_forward_mul_mat(
|
|||
}
|
||||
}
|
||||
}
|
||||
printf("lj.ggml_compute_forward_mul_mat = %f", *(float*)dst->data);
|
||||
|
||||
for (int i = 1; i < 3; i++) {
|
||||
|
||||
printf(" %f", ((float*)dst->data)[i]);
|
||||
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
// ggml_compute_forward_out_prod
|
||||
|
@ -12256,6 +12280,14 @@ static void ggml_compute_forward_soft_max_f32(
|
|||
}
|
||||
#endif
|
||||
}
|
||||
printf("lj.ggml_compute_forward_softmax = %f", *(float*)dst->data);
|
||||
|
||||
for (int i = 1; i < 3; i++) {
|
||||
|
||||
printf(" %f", ((float*)dst->data)[i]);
|
||||
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
static void ggml_compute_forward_soft_max(
|
||||
|
@ -12744,9 +12776,19 @@ static void ggml_compute_forward_rope_f32(
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
printf("lj.ggml_compute_forward_rope = %f", *(float*)dst->data);
|
||||
|
||||
for (int i = 1; i < 3; i++) {
|
||||
|
||||
printf(" %f", ((float*)dst->data)[i]);
|
||||
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
|
||||
static void ggml_compute_forward_rope_f16(
|
||||
const struct ggml_compute_params * params,
|
||||
const struct ggml_tensor * src0,
|
||||
|
|
|
@ -1966,7 +1966,7 @@ static void llm_load_tensors(
|
|||
|
||||
model.layers.resize(n_layer);
|
||||
|
||||
for (uint32_t i = 0; i < n_layer; ++i) {
|
||||
for (uint32_t i = 0; i < 1; ++i) {
|
||||
const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD; // NOLINT
|
||||
const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT; // NOLINT
|
||||
|
||||
|
@ -2515,6 +2515,7 @@ static struct ggml_cgraph * llm_build_llama(
|
|||
|
||||
// input for next layer
|
||||
inpL = cur;
|
||||
printf(" Final inpL = %f \n", *(float*)inpL->data);
|
||||
}
|
||||
|
||||
cur = inpL;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue