change name of GGML_OP_ADD_AT to GGML_OP_ACC

This commit is contained in:
xaedes 2023-05-07 21:14:57 +02:00
parent e0de09d77e
commit 4764842120
No known key found for this signature in database
GPG key ID: 30030EDD817EA2B1
3 changed files with 36 additions and 36 deletions

42
ggml.c
View file

@ -3960,7 +3960,7 @@ static const char * GGML_OP_LABEL[GGML_OP_COUNT] = {
"DUP",
"ADD",
"ADD1",
"ADD_AT",
"ACC",
"SUB",
"MUL",
"DIV",
@ -4020,7 +4020,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
"x",
"x+y",
"x+y",
"x[offset:]+y",
"view(x,nb,offset)+=y->x",
"x-y",
"x*y",
"x/y",
@ -5054,9 +5054,9 @@ struct ggml_tensor * ggml_add1_inplace(
return ggml_add1_impl(ctx, a, b, true);
}
// ggml_add_at
// ggml_acc
struct ggml_tensor * ggml_add_at_impl(
struct ggml_tensor * ggml_acc_impl(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
@ -5084,7 +5084,7 @@ struct ggml_tensor * ggml_add_at_impl(
((int32_t *) c->data)[3] = offset;
((int32_t *) c->data)[4] = inplace ? 1 : 0;
result->op = GGML_OP_ADD_AT;
result->op = GGML_OP_ACC;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src0 = a;
result->src1 = b;
@ -5093,7 +5093,7 @@ struct ggml_tensor * ggml_add_at_impl(
return result;
}
struct ggml_tensor * ggml_add_at(
struct ggml_tensor * ggml_acc(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
@ -5101,10 +5101,10 @@ struct ggml_tensor * ggml_add_at(
size_t nb2,
size_t nb3,
size_t offset) {
return ggml_add_at_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, false);
}
struct ggml_tensor * ggml_add_at_inplace(
struct ggml_tensor * ggml_acc_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
@ -5112,7 +5112,7 @@ struct ggml_tensor * ggml_add_at_inplace(
size_t nb2,
size_t nb3,
size_t offset) {
return ggml_add_at_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
return ggml_acc_impl(ctx, a, b, nb1, nb2, nb3, offset, true);
}
// ggml_sub
@ -8215,9 +8215,9 @@ static void ggml_compute_forward_add1(
}
// ggml_compute_forward_add_at
// ggml_compute_forward_acc
static void ggml_compute_forward_add_at_f32(
static void ggml_compute_forward_acc_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
@ -8229,7 +8229,7 @@ static void ggml_compute_forward_add_at_f32(
GGML_ASSERT(opt0->type == GGML_TYPE_I32);
GGML_ASSERT(ggml_nelements(opt0) == 5);
// view src0 and dst with these strides and data offset inbytes during add_at
// view src0 and dst with these strides and data offset inbytes during acc
// nb0 is implicitely element_size because src0 and dst are contiguous
size_t nb1 = ((int32_t *) opt0->data)[0];
size_t nb2 = ((int32_t *) opt0->data)[1];
@ -8266,7 +8266,7 @@ static void ggml_compute_forward_add_at_f32(
const size_t nb12 = src1->nb[2];
const size_t nb13 = src1->nb[3];
// src0 and dst as viewed during add_at
// src0 and dst as viewed during acc
const size_t nb0 = ggml_element_size(src0);
const size_t nb00 = nb0;
@ -8307,7 +8307,7 @@ static void ggml_compute_forward_add_at_f32(
}
}
static void ggml_compute_forward_add_at(
static void ggml_compute_forward_acc(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
@ -8317,7 +8317,7 @@ static void ggml_compute_forward_add_at(
switch (src0->type) {
case GGML_TYPE_F32:
{
ggml_compute_forward_add_at_f32(params, src0, src1, opt0, dst);
ggml_compute_forward_acc_f32(params, src0, src1, opt0, dst);
} break;
case GGML_TYPE_F16:
case GGML_TYPE_Q4_0:
@ -13168,9 +13168,9 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
{
ggml_compute_forward_add1(params, tensor->src0, tensor->src1, tensor);
} break;
case GGML_OP_ADD_AT:
case GGML_OP_ACC:
{
ggml_compute_forward_add_at(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
ggml_compute_forward_acc(params, tensor->src0, tensor->src1, tensor->opt[0], tensor);
} break;
case GGML_OP_SUB:
{
@ -13404,7 +13404,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
inplace);
}
} break;
case GGML_OP_ADD_AT:
case GGML_OP_ACC:
{
if (src0->grad) {
src0->grad = ggml_add_impl(ctx, src0->grad, tensor->grad, inplace);
@ -13767,7 +13767,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
if (src0->grad) {
src0->grad = ggml_add_impl(ctx,
src0->grad,
ggml_add_at_impl(ctx,
ggml_acc_impl(ctx,
tensor->grad,
ggml_neg(ctx, tensor_grad_view),
nb1, nb2, nb3, offset, false),
@ -13848,7 +13848,7 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
nb3 = (nb3 / n0) * ng;
}
src0->grad = ggml_add_at_impl(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, inplace);
src0->grad = ggml_acc_impl(ctx, src0->grad, tensor->grad, nb1, nb2, nb3, offset, inplace);
}
} break;
case GGML_OP_PERMUTE:
@ -14394,7 +14394,7 @@ void ggml_graph_compute(struct ggml_context * ctx, struct ggml_cgraph * cgraph)
work_size = MAX(work_size, cur);
} break;
case GGML_OP_ADD_AT:
case GGML_OP_ACC:
{
node->n_tasks = n_threads;

6
ggml.h
View file

@ -253,7 +253,7 @@ extern "C" {
GGML_OP_DUP,
GGML_OP_ADD,
GGML_OP_ADD1,
GGML_OP_ADD_AT,
GGML_OP_ACC,
GGML_OP_SUB,
GGML_OP_MUL,
GGML_OP_DIV,
@ -496,7 +496,7 @@ extern "C" {
struct ggml_tensor * a,
struct ggml_tensor * b);
GGML_API struct ggml_tensor * ggml_add_at(
GGML_API struct ggml_tensor * ggml_acc(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,
@ -505,7 +505,7 @@ extern "C" {
size_t nb3,
size_t offset);
GGML_API struct ggml_tensor * ggml_add_at_inplace(
GGML_API struct ggml_tensor * ggml_acc_inplace(
struct ggml_context * ctx,
struct ggml_tensor * a,
struct ggml_tensor * b,

View file

@ -697,7 +697,7 @@ int main(int argc, const char ** argv) {
}
}
// add_at 1d
// acc 1d
{
int64_t ne2[4] = { 1, 1, 1, 1 };
@ -718,13 +718,13 @@ int main(int argc, const char ** argv) {
const int max_offset = MAX(0, ggml_nelements(x[0]) - ggml_nelements(x[1]));
const int offset = irand(max_offset) * ggml_element_size(x[0]);
struct ggml_tensor * f = ggml_sum(ctx0, ggml_add_at(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
struct ggml_tensor * f = ggml_sum(ctx0, ggml_acc(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
check_gradient("add_at 1d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
check_gradient("acc 1d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
}
}
// add_at 2d
// acc 2d
{
int64_t ne2[4] = { 1, 1, 1, 1 };
int64_t max_offsets[4] = { 0, 0, 0, 0 };
@ -750,13 +750,13 @@ int main(int argc, const char ** argv) {
offsets[1] = irand(max_offsets[1]) * x[0]->nb[1];
const int offset = offsets[0] + offsets[1];
struct ggml_tensor * f = ggml_sum(ctx0, ggml_add_at(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
struct ggml_tensor * f = ggml_sum(ctx0, ggml_acc(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
check_gradient("add_at 2d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
check_gradient("acc 2d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
}
}
// add_at 3d
// acc 3d
{
int64_t ne2[4] = { 1, 1, 1, 1 };
int64_t max_offsets[4] = { 0, 0, 0, 0 };
@ -784,13 +784,13 @@ int main(int argc, const char ** argv) {
offsets[2] = irand(max_offsets[2]) * x[0]->nb[2];
const int offset = offsets[0] + offsets[1] + offsets[2];
struct ggml_tensor * f = ggml_sum(ctx0, ggml_add_at(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
struct ggml_tensor * f = ggml_sum(ctx0, ggml_acc(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
check_gradient("add_at 3d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
check_gradient("acc 3d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
}
}
// add_at 4d
// acc 4d
{
int64_t ne2[4] = { 1, 1, 1, 1 };
int64_t max_offsets[4] = { 0, 0, 0, 0 };
@ -820,9 +820,9 @@ int main(int argc, const char ** argv) {
offsets[3] = irand(max_offsets[3]) * x[0]->nb[3];
const int offset = offsets[0] + offsets[1] + offsets[2] + offsets[3];
struct ggml_tensor * f = ggml_sum(ctx0, ggml_add_at(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
struct ggml_tensor * f = ggml_sum(ctx0, ggml_acc(ctx0, x[0], x[1], x[0]->nb[1], x[0]->nb[2], x[0]->nb[3], offset));
check_gradient("add_at 4d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
check_gradient("acc 4d", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
}
}