add more automatic names to view ops
This commit is contained in:
parent
0cc5c5325c
commit
b19334ec76
1 changed files with 17 additions and 1 deletions
18
ggml.c
18
ggml.c
|
@ -4747,7 +4747,6 @@ struct ggml_tensor * ggml_view_tensor(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
const struct ggml_tensor * src) {
|
const struct ggml_tensor * src) {
|
||||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src->data);
|
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, src->type, src->n_dims, src->ne, src->data);
|
||||||
|
|
||||||
ggml_format_name(result, "%s (view)", src->name);
|
ggml_format_name(result, "%s (view)", src->name);
|
||||||
|
|
||||||
result->nb[0] = src->nb[0];
|
result->nb[0] = src->nb[0];
|
||||||
|
@ -5910,6 +5909,11 @@ struct ggml_tensor * ggml_cpy_impl(
|
||||||
|
|
||||||
// make a view of the destination
|
// make a view of the destination
|
||||||
struct ggml_tensor * result = ggml_view_tensor(ctx, b);
|
struct ggml_tensor * result = ggml_view_tensor(ctx, b);
|
||||||
|
if (strlen(a->name) > 0) {
|
||||||
|
ggml_format_name(result, "%s (copy of %s)", a->name, b->name);
|
||||||
|
} else {
|
||||||
|
ggml_format_name(result, "%s (copy)", b->name);
|
||||||
|
}
|
||||||
|
|
||||||
result->op = GGML_OP_CPY;
|
result->op = GGML_OP_CPY;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
|
@ -5946,6 +5950,7 @@ struct ggml_tensor * ggml_cont_impl(
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
||||||
|
ggml_format_name(result, "%s (cont)", a->name);
|
||||||
|
|
||||||
result->op = GGML_OP_CONT;
|
result->op = GGML_OP_CONT;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
|
@ -5989,6 +5994,7 @@ struct ggml_tensor * ggml_reshape(
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a->data);
|
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, b->n_dims, b->ne, a->data);
|
||||||
|
ggml_format_name(result, "%s (reshaped)", a->name);
|
||||||
|
|
||||||
result->op = GGML_OP_RESHAPE;
|
result->op = GGML_OP_RESHAPE;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
|
@ -6013,6 +6019,7 @@ struct ggml_tensor * ggml_reshape_1d(
|
||||||
|
|
||||||
const int64_t ne[1] = { ne0 };
|
const int64_t ne[1] = { ne0 };
|
||||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a->data);
|
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, ne, a->data);
|
||||||
|
ggml_format_name(result, "%s (reshaped)", a->name);
|
||||||
|
|
||||||
result->op = GGML_OP_RESHAPE;
|
result->op = GGML_OP_RESHAPE;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
|
@ -6038,6 +6045,7 @@ struct ggml_tensor * ggml_reshape_2d(
|
||||||
|
|
||||||
const int64_t ne[2] = { ne0, ne1 };
|
const int64_t ne[2] = { ne0, ne1 };
|
||||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data);
|
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, a->data);
|
||||||
|
ggml_format_name(result, "%s (reshaped)", a->name);
|
||||||
|
|
||||||
result->op = GGML_OP_RESHAPE;
|
result->op = GGML_OP_RESHAPE;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
|
@ -6064,6 +6072,7 @@ struct ggml_tensor * ggml_reshape_3d(
|
||||||
|
|
||||||
const int64_t ne[3] = { ne0, ne1, ne2 };
|
const int64_t ne[3] = { ne0, ne1, ne2 };
|
||||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data);
|
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, a->data);
|
||||||
|
ggml_format_name(result, "%s (reshaped)", a->name);
|
||||||
|
|
||||||
result->op = GGML_OP_RESHAPE;
|
result->op = GGML_OP_RESHAPE;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
|
@ -6092,6 +6101,7 @@ struct ggml_tensor * ggml_reshape_4d(
|
||||||
|
|
||||||
const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
|
const int64_t ne[4] = { ne0, ne1, ne2, ne3 };
|
||||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a->data);
|
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, a->data);
|
||||||
|
ggml_format_name(result, "%s (reshaped)", a->name);
|
||||||
|
|
||||||
result->op = GGML_OP_RESHAPE;
|
result->op = GGML_OP_RESHAPE;
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
|
@ -6116,6 +6126,7 @@ struct ggml_tensor * ggml_view_1d(
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset);
|
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 1, &ne0, (char *) a->data + offset);
|
||||||
|
ggml_format_name(result, "%s (view)", a->name);
|
||||||
|
|
||||||
ggml_scratch_save(ctx);
|
ggml_scratch_save(ctx);
|
||||||
|
|
||||||
|
@ -6153,6 +6164,7 @@ struct ggml_tensor * ggml_view_2d(
|
||||||
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 };
|
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, 1, 1 };
|
||||||
|
|
||||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset);
|
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 2, ne, (char *) a->data + offset);
|
||||||
|
ggml_format_name(result, "%s (view)", a->name);
|
||||||
|
|
||||||
ggml_scratch_save(ctx);
|
ggml_scratch_save(ctx);
|
||||||
|
|
||||||
|
@ -6196,6 +6208,7 @@ struct ggml_tensor * ggml_view_3d(
|
||||||
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, 1 };
|
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, 1 };
|
||||||
|
|
||||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, (char *) a->data + offset);
|
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 3, ne, (char *) a->data + offset);
|
||||||
|
ggml_format_name(result, "%s (view)", a->name);
|
||||||
|
|
||||||
ggml_scratch_save(ctx);
|
ggml_scratch_save(ctx);
|
||||||
|
|
||||||
|
@ -6241,6 +6254,7 @@ struct ggml_tensor * ggml_view_4d(
|
||||||
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, ne3 };
|
const int64_t ne[GGML_MAX_DIMS] = { ne0, ne1, ne2, ne3 };
|
||||||
|
|
||||||
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, (char *) a->data + offset);
|
struct ggml_tensor * result = ggml_new_tensor_impl(ctx, a->type, 4, ne, (char *) a->data + offset);
|
||||||
|
ggml_format_name(result, "%s (view)", a->name);
|
||||||
|
|
||||||
ggml_scratch_save(ctx);
|
ggml_scratch_save(ctx);
|
||||||
|
|
||||||
|
@ -6291,6 +6305,7 @@ struct ggml_tensor * ggml_permute(
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * result = ggml_view_tensor(ctx, a);
|
struct ggml_tensor * result = ggml_view_tensor(ctx, a);
|
||||||
|
ggml_format_name(result, "%s (permuted)", a->name);
|
||||||
|
|
||||||
int ne[GGML_MAX_DIMS];
|
int ne[GGML_MAX_DIMS];
|
||||||
int nb[GGML_MAX_DIMS];
|
int nb[GGML_MAX_DIMS];
|
||||||
|
@ -6350,6 +6365,7 @@ struct ggml_tensor * ggml_transpose(
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * result = ggml_view_tensor(ctx, a);
|
struct ggml_tensor * result = ggml_view_tensor(ctx, a);
|
||||||
|
ggml_format_name(result, "%s (transposed)", a->name);
|
||||||
|
|
||||||
result->ne[0] = a->ne[1];
|
result->ne[0] = a->ne[1];
|
||||||
result->ne[1] = a->ne[0];
|
result->ne[1] = a->ne[0];
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue