add ggml API functions ggml_unravel_index, ggml_get_i32_nd and its analogs for set and for f32

ggml_get_i32_1d, ggml_set_i32_1d, ggml_get_f32_1d, ggml_set_f32_1d now support non-contiguous tensors.
in case of non-contiguous tensor, the 1d index is unraveled into a multi index using ggml_unravel_index to be passed to '_nd' function equivalent.

this fixes a bug in test-grad0 which happens due to ggml_build_backward not building purely contiguous tensors anymore
This commit is contained in:
xaedes 2023-08-29 20:59:31 +02:00
parent 5f0a4e971f
commit 82c5247a20
No known key found for this signature in database
GPG key ID: 30030EDD817EA2B1
2 changed files with 180 additions and 0 deletions

171
ggml.c
View file

@ -4838,7 +4838,37 @@ struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
return tensor; return tensor;
} }
void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3) {
const int64_t ne3 = tensor->ne[3];
const int64_t ne2 = tensor->ne[2];
const int64_t ne1 = tensor->ne[1];
const int64_t ne0 = tensor->ne[0];
const int64_t i3_ = (i/(ne2*ne1*ne0));
const int64_t i2_ = (i - i3_*ne2*ne1*ne0)/(ne1*ne0);
const int64_t i1_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0)/ne0;
const int64_t i0_ = (i - i3_*ne2*ne1*ne0 - i2_*ne1*ne0 - i1_*ne0);
if (i0) {
* i0 = i0_;
}
if (i1) {
* i1 = i1_;
}
if (i2) {
* i2 = i2_;
}
if (i3) {
* i3 = i3_;
}
}
int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) { int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
if (!ggml_is_contiguous(tensor)) {
int64_t id[4] = { 0, 0, 0, 0 };
ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]);
}
switch (tensor->type) { switch (tensor->type) {
case GGML_TYPE_I8: case GGML_TYPE_I8:
{ {
@ -4875,6 +4905,12 @@ int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
} }
void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) { void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
if (!ggml_is_contiguous(tensor)) {
int64_t id[4] = { 0, 0, 0, 0 };
ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value);
return;
}
switch (tensor->type) { switch (tensor->type) {
case GGML_TYPE_I8: case GGML_TYPE_I8:
{ {
@ -4908,7 +4944,74 @@ void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
} }
} }
int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
switch (tensor->type) {
case GGML_TYPE_I8:
{
return ((int8_t *) data)[0];
} break;
case GGML_TYPE_I16:
{
return ((int16_t *) data)[0];
} break;
case GGML_TYPE_I32:
{
return ((int32_t *) data)[0];
} break;
case GGML_TYPE_F16:
{
return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
} break;
case GGML_TYPE_F32:
{
return ((float *) data)[0];
} break;
default:
{
GGML_ASSERT(false);
} break;
}
return 0.0f;
}
void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) {
void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
switch (tensor->type) {
case GGML_TYPE_I8:
{
((int8_t *)(data))[0] = value;
} break;
case GGML_TYPE_I16:
{
((int16_t *)(data))[0] = value;
} break;
case GGML_TYPE_I32:
{
((int32_t *)(data))[0] = value;
} break;
case GGML_TYPE_F16:
{
((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
} break;
case GGML_TYPE_F32:
{
((float *)(data))[0] = value;
} break;
default:
{
GGML_ASSERT(false);
} break;
}
}
float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) { float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
if (!ggml_is_contiguous(tensor)) {
int64_t id[4] = { 0, 0, 0, 0 };
ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]);
}
switch (tensor->type) { switch (tensor->type) {
case GGML_TYPE_I8: case GGML_TYPE_I8:
{ {
@ -4945,6 +5048,12 @@ float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
} }
void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) { void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
if (!ggml_is_contiguous(tensor)) {
int64_t id[4] = { 0, 0, 0, 0 };
ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
return;
}
switch (tensor->type) { switch (tensor->type) {
case GGML_TYPE_I8: case GGML_TYPE_I8:
{ {
@ -4978,6 +5087,68 @@ void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
} }
} }
float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
switch (tensor->type) {
case GGML_TYPE_I8:
{
return ((int8_t *) data)[0];
} break;
case GGML_TYPE_I16:
{
return ((int16_t *) data)[0];
} break;
case GGML_TYPE_I32:
{
return ((int32_t *) data)[0];
} break;
case GGML_TYPE_F16:
{
return GGML_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
} break;
case GGML_TYPE_F32:
{
return ((float *) data)[0];
} break;
default:
{
GGML_ASSERT(false);
} break;
}
return 0.0f;
}
void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) {
void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
switch (tensor->type) {
case GGML_TYPE_I8:
{
((int8_t *)(data))[0] = value;
} break;
case GGML_TYPE_I16:
{
((int16_t *)(data))[0] = value;
} break;
case GGML_TYPE_I32:
{
((int32_t *)(data))[0] = value;
} break;
case GGML_TYPE_F16:
{
((ggml_fp16_t *)(data))[0] = GGML_FP32_TO_FP16(value);
} break;
case GGML_TYPE_F32:
{
((float *)(data))[0] = value;
} break;
default:
{
GGML_ASSERT(false);
} break;
}
}
void * ggml_get_data(const struct ggml_tensor * tensor) { void * ggml_get_data(const struct ggml_tensor * tensor) {
return tensor->data; return tensor->data;
} }

9
ggml.h
View file

@ -671,12 +671,21 @@ extern "C" {
GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value); GGML_API struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value);
GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value); GGML_API struct ggml_tensor * ggml_set_f32 (struct ggml_tensor * tensor, float value);
// Converts a flat index into coordinates
GGML_API void ggml_unravel_index(const struct ggml_tensor * tensor, int64_t i, int64_t * i0, int64_t * i1, int64_t * i2, int64_t * i3);
GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i); GGML_API int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i);
GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value); GGML_API void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value);
GGML_API int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i1, int i2, int i3);
GGML_API void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i1, int i2, int i3, int32_t value);
GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i); GGML_API float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i);
GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value); GGML_API void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value);
GGML_API float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3);
GGML_API void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value);
GGML_API void * ggml_get_data (const struct ggml_tensor * tensor); GGML_API void * ggml_get_data (const struct ggml_tensor * tensor);
GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor); GGML_API float * ggml_get_data_f32(const struct ggml_tensor * tensor);