naming : normalize the name of callback-related identifiers

ggml-ci
This commit is contained in:
Georgi Gerganov 2024-09-16 09:11:42 +03:00
parent c4965a64f7
commit cc1c017191
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
14 changed files with 202 additions and 215 deletions

View file

@ -104,7 +104,7 @@ extern "C" {
GGML_API GGML_CALL bool ggml_backend_is_cpu (ggml_backend_t backend);
GGML_API void ggml_backend_cpu_set_n_threads (ggml_backend_t backend_cpu, int n_threads);
GGML_API void ggml_backend_cpu_set_threadpool (ggml_backend_t backend_cpu, ggml_threadpool_t threadpool);
GGML_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data);
GGML_API void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback cb, void * cb_ctx);
// Create a backend buffer from an existing pointer
GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size);
@ -177,7 +177,7 @@ extern "C" {
// when ask == false, the scheduler is passing the node tensor to the user for observation
// if the user returns false, the scheduler will cancel the graph compute
//
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * user_data);
typedef bool (*ggml_backend_sched_eval_callback)(struct ggml_tensor * t, bool ask, void * cb_ctx);
// Initialize a backend scheduler
GGML_API ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size, bool parallel);
@ -208,7 +208,7 @@ extern "C" {
GGML_API void ggml_backend_sched_reset(ggml_backend_sched_t sched);
// Set a callback to be called for each resulting node during graph compute
GGML_API void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data);
GGML_API void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback cb, void * cb_ctx);
//
// Utils
@ -225,10 +225,10 @@ extern "C" {
GGML_API struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph);
GGML_API void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy);
typedef bool (*GGML_CALL ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * user_data);
typedef bool (*GGML_CALL ggml_backend_eval_callback)(int node_index, struct ggml_tensor * t1, struct ggml_tensor * t2, void * cb_ctx);
// Compare the output of two backends
GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data);
GGML_API bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback cb_eval, void * cb_eval_ctx);
// Tensor initialization
GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr);

View file

@ -40,7 +40,7 @@ extern "C" {
// user-code should use only these functions
//
GGML_API void ggml_backend_metal_log_set_callback(ggml_log_callback log_callback, void * user_data);
GGML_API void ggml_backend_metal_log_set_callback(ggml_log_callback cb, void * cb_ctx);
GGML_API ggml_backend_t ggml_backend_metal_init(void);
@ -50,7 +50,7 @@ GGML_API GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void
GGML_API void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb);
GGML_API void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data);
GGML_API void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback cb, void * cb_ctx);
GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_metal_buffer_type(void);

View file

@ -620,7 +620,7 @@ extern "C" {
// Abort callback
// If not NULL, called before ggml computation
// If it returns true, the computation is aborted
typedef bool (*ggml_abort_callback)(void * data);
typedef bool (*ggml_abort_callback)(void * cb_ctx);
// Scheduling priorities
enum ggml_sched_priority {
@ -655,8 +655,8 @@ extern "C" {
struct ggml_threadpool * threadpool;
// abort ggml_graph_compute when true
ggml_abort_callback abort_callback;
void * abort_callback_data;
ggml_abort_callback cb_abort;
void * cb_abort_ctx;
};
// scratch buffer
@ -2143,8 +2143,8 @@ extern "C" {
GGML_LINESEARCH_INVALID_PARAMETERS,
};
typedef void (*ggml_opt_callback)(void * data, int accum_step, float * sched, bool * cancel);
typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * user_data);
typedef void (*ggml_opt_callback)(void * cb_ctx, int accum_step, float * sched, bool * cancel);
typedef void (*ggml_log_callback)(enum ggml_log_level level, const char * text, void * cb_ctx);
// optimization parameters
//
@ -2281,8 +2281,8 @@ extern "C" {
struct ggml_tensor * f,
struct ggml_cgraph * gf,
struct ggml_cgraph * gb,
ggml_opt_callback callback,
void * callback_data);
ggml_opt_callback cb_opt,
void * cb_opt_ctx);
//
// tensor flags

View file

@ -728,8 +728,8 @@ struct ggml_backend_cpu_context {
void * work_data;
size_t work_size;
ggml_abort_callback abort_callback;
void * abort_callback_data;
ggml_abort_callback cb_abort;
void * cb_abort_ctx;
};
GGML_CALL static const char * ggml_backend_cpu_name(ggml_backend_t backend) {
@ -772,8 +772,8 @@ GGML_CALL static ggml_backend_graph_plan_t ggml_backend_cpu_graph_plan_create(gg
}
}
cpu_plan->cplan.abort_callback = cpu_ctx->abort_callback;
cpu_plan->cplan.abort_callback_data = cpu_ctx->abort_callback_data;
cpu_plan->cplan.cb_abort = cpu_ctx->cb_abort;
cpu_plan->cplan.cb_abort_ctx = cpu_ctx->cb_abort_ctx;
return cpu_plan;
}
@ -811,8 +811,8 @@ GGML_CALL static enum ggml_status ggml_backend_cpu_graph_compute(ggml_backend_t
}
cplan.work_data = cpu_ctx->work_data;
cplan.abort_callback = cpu_ctx->abort_callback;
cplan.abort_callback_data = cpu_ctx->abort_callback_data;
cplan.cb_abort = cpu_ctx->cb_abort;
cplan.cb_abort_ctx = cpu_ctx->cb_abort_ctx;
return ggml_graph_compute(cgraph, &cplan);
}
@ -878,12 +878,12 @@ ggml_backend_t ggml_backend_cpu_init(void) {
return NULL;
}
ctx->n_threads = GGML_DEFAULT_N_THREADS;
ctx->threadpool = NULL;
ctx->work_data = NULL;
ctx->work_size = 0;
ctx->abort_callback = NULL;
ctx->abort_callback_data = NULL;
ctx->n_threads = GGML_DEFAULT_N_THREADS;
ctx->threadpool = NULL;
ctx->work_data = NULL;
ctx->work_size = 0;
ctx->cb_abort = NULL;
ctx->cb_abort_ctx = NULL;
ggml_backend_t cpu_backend = malloc(sizeof(struct ggml_backend));
if (cpu_backend == NULL) {
@ -922,12 +922,12 @@ void ggml_backend_cpu_set_threadpool(ggml_backend_t backend_cpu, ggml_threadpool
ctx->threadpool = threadpool;
}
void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback abort_callback, void * abort_callback_data) {
void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_callback cb, void * cb_ctx) {
GGML_ASSERT(ggml_backend_is_cpu(backend_cpu));
struct ggml_backend_cpu_context * ctx = (struct ggml_backend_cpu_context *)backend_cpu->context;
ctx->abort_callback = abort_callback;
ctx->abort_callback_data = abort_callback_data;
ctx->cb_abort = cb;
ctx->cb_abort_ctx = cb_ctx;
}
GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) {
@ -1093,8 +1093,8 @@ struct ggml_backend_sched {
struct ggml_context * ctx;
ggml_backend_sched_eval_callback callback_eval;
void * callback_eval_user_data;
ggml_backend_sched_eval_callback cb_eval;
void * cb_eval_ctx;
char * context_buffer;
size_t context_buffer_size;
@ -1814,7 +1814,7 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
}
}
if (!sched->callback_eval) {
if (!sched->cb_eval) {
enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &split->graph);
if (ec != GGML_STATUS_SUCCESS) {
return ec;
@ -1825,14 +1825,14 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
struct ggml_tensor * t = split->graph.nodes[j0];
// check if the user needs data from this node
bool need = sched->callback_eval(t, true, sched->callback_eval_user_data);
bool need = sched->cb_eval(t, true, sched->cb_eval_ctx);
int j1 = j0;
// determine the range [j0, j1] of nodes that can be computed together
while (!need && j1 < split->graph.n_nodes - 1) {
t = split->graph.nodes[++j1];
need = sched->callback_eval(t, true, sched->callback_eval_user_data);
need = sched->cb_eval(t, true, sched->cb_eval_ctx);
}
struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1);
@ -1845,7 +1845,7 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
// TODO: pass backend to the callback, then the user can decide if they want to synchronize
ggml_backend_synchronize(split_backend);
if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) {
if (need && !sched->cb_eval(t, false, sched->cb_eval_ctx)) {
break;
}
@ -2012,9 +2012,9 @@ void ggml_backend_sched_synchronize(ggml_backend_sched_t sched) {
}
}
void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) {
sched->callback_eval = callback;
sched->callback_eval_user_data = user_data;
void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback cb, void * cb_ctx) {
sched->cb_eval = cb;
sched->cb_eval_ctx = cb_ctx;
}
int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) {
@ -2229,7 +2229,7 @@ void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) {
ggml_free(copy.ctx_unallocated);
}
bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data) {
bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback cb_eval, void * cb_eval_ctx) {
struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph);
if (copy.buffer == NULL) {
return false;
@ -2258,7 +2258,7 @@ bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t
}
// compare results, calculate rms etc
if (!callback(i, t1, t2, user_data)) {
if (!cb_eval(i, t1, t2, cb_eval_ctx)) {
break;
}
}

View file

@ -236,8 +236,8 @@ struct ggml_backend_metal_context {
bool should_capture_next_compute;
// abort ggml_metal_graph_compute if callback returns true
ggml_abort_callback abort_callback;
void * abort_callback_data;
ggml_abort_callback cb_abort;
void * cb_abort_ctx;
};
// MSL code
@ -251,32 +251,32 @@ struct ggml_backend_metal_context {
@implementation GGMLMetalClass
@end
static void ggml_metal_default_log_callback(enum ggml_log_level level, const char * msg, void * user_data) {
static void ggml_metal_default_log_callback(enum ggml_log_level level, const char * msg, void * cb_ctx) {
fprintf(stderr, "%s", msg);
UNUSED(level);
UNUSED(user_data);
UNUSED(cb_ctx);
}
ggml_log_callback ggml_metal_log_callback = ggml_metal_default_log_callback;
void * ggml_metal_log_user_data = NULL;
static ggml_log_callback ggml_metal_log_cb = ggml_metal_default_log_callback;
static void * ggml_metal_log_cb_ctx = NULL;
GGML_ATTRIBUTE_FORMAT(2, 3)
static void ggml_metal_log(enum ggml_log_level level, const char * format, ...){
if (ggml_metal_log_callback != NULL) {
if (ggml_metal_log_cb != NULL) {
va_list args;
va_start(args, format);
char buffer[128];
int len = vsnprintf(buffer, 128, format, args);
if (len < 128) {
ggml_metal_log_callback(level, buffer, ggml_metal_log_user_data);
ggml_metal_log_cb(level, buffer, ggml_metal_log_cb_ctx);
} else {
char* buffer2 = malloc(len+1);
va_end(args);
va_start(args, format);
vsnprintf(buffer2, len+1, format, args);
buffer2[len] = 0;
ggml_metal_log_callback(level, buffer2, ggml_metal_log_user_data);
ggml_metal_log_cb(level, buffer2, ggml_metal_log_cb_ctx);
free(buffer2);
}
va_end(args);
@ -910,7 +910,7 @@ static enum ggml_status ggml_metal_graph_compute(
// always enqueue the first two command buffers
// enqueue all of the command buffers if we don't need to abort
if (cb_idx < 2 || ctx->abort_callback == NULL) {
if (cb_idx < 2 || ctx->cb_abort == NULL) {
[command_buffer enqueue];
}
}
@ -3026,7 +3026,7 @@ static enum ggml_status ggml_metal_graph_compute(
[encoder endEncoding];
if (cb_idx < 2 || ctx->abort_callback == NULL) {
if (cb_idx < 2 || ctx->cb_abort == NULL) {
[command_buffer commit];
}
});
@ -3058,7 +3058,7 @@ static enum ggml_status ggml_metal_graph_compute(
continue;
}
if (ctx->abort_callback && ctx->abort_callback(ctx->abort_callback_data)) {
if (ctx->cb_abort && ctx->cb_abort(ctx->cb_abort_ctx)) {
GGML_METAL_LOG_INFO("%s: command buffer %d aborted", __func__, i);
return GGML_STATUS_ABORTED;
}
@ -3225,19 +3225,15 @@ GGML_CALL static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buff
ctx->n_buffers = 1;
if (ctx->all_data != NULL) {
ctx->buffers[0].data = ctx->all_data;
ctx->buffers[0].size = size;
ctx->buffers[0].metal = nil;
if (size_aligned > 0) {
ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data
length:size_aligned
options:MTLResourceStorageModeShared
deallocator:nil];
}
ctx->buffers[0].data = ctx->all_data;
ctx->buffers[0].size = size;
ctx->buffers[0].metal = [device newBufferWithBytesNoCopy:ctx->all_data
length:size_aligned
options:MTLResourceStorageModeShared
deallocator:nil];
}
if (size_aligned > 0 && (ctx->all_data == NULL || ctx->buffers[0].metal == nil)) {
if (ctx->all_data == NULL || ctx->buffers[0].metal == nil) {
GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
free(ctx);
ggml_backend_metal_free_device();
@ -3314,17 +3310,14 @@ GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data,
// the buffer fits into the max buffer size allowed by the device
if (size_aligned <= device.maxBufferLength) {
ctx->buffers[ctx->n_buffers].data = data;
ctx->buffers[ctx->n_buffers].size = size;
ctx->buffers[ctx->n_buffers].metal = nil;
ctx->buffers[ctx->n_buffers].data = data;
ctx->buffers[ctx->n_buffers].size = size;
if (size_aligned > 0) {
ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:data length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
if (ctx->buffers[ctx->n_buffers].metal == nil) {
GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
return false;
}
if (ctx->buffers[ctx->n_buffers].metal == nil) {
GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
return false;
}
ggml_backend_metal_log_allocated_size(device, size_aligned);
@ -3340,17 +3333,14 @@ GGML_CALL ggml_backend_buffer_t ggml_backend_metal_buffer_from_ptr(void * data,
for (size_t i = 0; i < size; i += size_step) {
const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i);
ctx->buffers[ctx->n_buffers].data = (void *) ((uint8_t *) data + i);
ctx->buffers[ctx->n_buffers].size = size_step_aligned;
ctx->buffers[ctx->n_buffers].metal = nil;
ctx->buffers[ctx->n_buffers].data = (void *) ((uint8_t *) data + i);
ctx->buffers[ctx->n_buffers].size = size_step_aligned;
if (size_step_aligned > 0) {
ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
ctx->buffers[ctx->n_buffers].metal = [device newBufferWithBytesNoCopy:(void *) ((uint8_t *) data + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
if (ctx->buffers[ctx->n_buffers].metal == nil) {
GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_step_aligned / 1024.0 / 1024.0);
return false;
}
if (ctx->buffers[ctx->n_buffers].metal == nil) {
GGML_METAL_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_step_aligned / 1024.0 / 1024.0);
return false;
}
ggml_backend_metal_log_allocated_size(device, size_step_aligned);
@ -3427,9 +3417,9 @@ static struct ggml_backend_i ggml_backend_metal_i = {
/* .event_synchronize = */ NULL,
};
void ggml_backend_metal_log_set_callback(ggml_log_callback log_callback, void * user_data) {
ggml_metal_log_callback = log_callback;
ggml_metal_log_user_data = user_data;
void ggml_backend_metal_log_set_callback(ggml_log_callback cb, void * cb_ctx) {
ggml_metal_log_cb = cb;
ggml_metal_log_cb_ctx = cb_ctx;
}
static ggml_guid_t ggml_backend_metal_guid(void) {
@ -3467,13 +3457,13 @@ void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_BUFFERS);
}
void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data) {
void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback cb, void * cb_ctx) {
GGML_ASSERT(ggml_backend_is_metal(backend));
struct ggml_backend_metal_context * ctx = (struct ggml_backend_metal_context *)backend->context;
ctx->abort_callback = abort_callback;
ctx->abort_callback_data = user_data;
ctx->cb_abort = cb;
ctx->cb_abort_ctx = cb_ctx;
}
bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family) {
@ -3491,11 +3481,11 @@ void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) {
ctx->should_capture_next_compute = true;
}
GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * user_data); // silence warning
GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * cb_ctx); // silence warning
GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * user_data) {
GGML_CALL ggml_backend_t ggml_backend_reg_metal_init(const char * params, void * cb_ctx) {
return ggml_backend_metal_init();
GGML_UNUSED(params);
GGML_UNUSED(user_data);
GGML_UNUSED(cb_ctx);
}

View file

@ -184,7 +184,7 @@ struct backtrace_state {
void ** end;
};
static _Unwind_Reason_Code unwind_callback(struct _Unwind_Context* context, void* arg) {
static _Unwind_Reason_Code unwind_callback(struct _Unwind_Context * context, void * arg) {
struct backtrace_state * state = (struct backtrace_state *)arg;
uintptr_t pc = _Unwind_GetIP(context);
if (pc) {
@ -19951,7 +19951,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
ggml_compute_forward(&params, node);
if (state->ith == 0 && cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
if (state->ith == 0 && cplan->cb_abort && cplan->cb_abort(cplan->cb_abort_ctx)) {
state->threadpool->ec = GGML_STATUS_ABORTED;
}
@ -21011,8 +21011,8 @@ static enum ggml_opt_result ggml_opt_adam(
struct ggml_tensor * f,
struct ggml_cgraph * gf,
struct ggml_cgraph * gb,
ggml_opt_callback callback,
void * callback_data) {
ggml_opt_callback cb_opt,
void * cb_opt_ctx) {
GGML_ASSERT(ggml_is_scalar(f));
GGML_ASSERT(f->type == GGML_TYPE_F32);
@ -21066,8 +21066,8 @@ static enum ggml_opt_result ggml_opt_adam(
float fx = 0;
ggml_set_zero(opt->adam.g);
for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
if (callback) {
callback(callback_data, accum_step, &sched, &cancel);
if (cb_opt) {
cb_opt(cb_opt_ctx, accum_step, &sched, &cancel);
if (cancel) {
return GGML_OPT_RESULT_CANCEL;
}
@ -21157,8 +21157,8 @@ static enum ggml_opt_result ggml_opt_adam(
fx = 0;
ggml_set_zero(opt->adam.g);
for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
if (callback) {
callback(callback_data, accum_step, &sched, &cancel);
if (cb_opt) {
cb_opt(cb_opt_ctx, accum_step, &sched, &cancel);
if (cancel) {
return GGML_OPT_RESULT_CANCEL;;
}
@ -21254,8 +21254,8 @@ static enum ggml_opt_result linesearch_backtracking(
const int np,
struct ggml_tensor * ps[],
bool * cancel,
ggml_opt_callback callback,
void * callback_data) {
ggml_opt_callback cb_opt,
void * cb_opt_ctx) {
int count = 0;
float width = 0.0f;
@ -21297,10 +21297,10 @@ static enum ggml_opt_result linesearch_backtracking(
*fx = 0;
memset(g, 0, sizeof(float)*nx);
for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
if (callback) {
if (cb_opt) {
// LBFG-S does not support learning rate -> ignore learning schedule
float sched = 0;
callback(callback_data, accum_step, &sched, cancel);
cb_opt(cb_opt_ctx, accum_step, &sched, cancel);
if (*cancel) {
return GGML_OPT_RESULT_CANCEL;
}
@ -21370,8 +21370,8 @@ static enum ggml_opt_result ggml_opt_lbfgs(
struct ggml_tensor * f,
struct ggml_cgraph * gf,
struct ggml_cgraph * gb,
ggml_opt_callback callback,
void * callback_data) {
ggml_opt_callback cb_opt,
void * cb_opt_ctx) {
if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
@ -21440,10 +21440,10 @@ static enum ggml_opt_result ggml_opt_lbfgs(
fx = 0;
memset(g, 0, sizeof(float)*nx);
for (int accum_step = 0; accum_step < n_accum; ++accum_step) {
if (callback) {
if (cb_opt) {
// LBFG-S does not support learning rate -> ignore learning schedule
float sched = 0;
callback(callback_data, accum_step, &sched, &cancel);
cb_opt(cb_opt_ctx, accum_step, &sched, &cancel);
if (cancel) {
return GGML_OPT_RESULT_CANCEL;
}
@ -21516,7 +21516,7 @@ static enum ggml_opt_result ggml_opt_lbfgs(
// to determine if the optimization should be cancelled
// this is a simple change, but not doing this atm, since I don't have a nice
// way to test and don't want to break something with so many changes lined up
ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data);
ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, cb_opt, cb_opt_ctx);
if (cancel) {
return GGML_OPT_RESULT_CANCEL;
}
@ -21834,8 +21834,8 @@ enum ggml_opt_result ggml_opt_resume_g(
struct ggml_tensor * f,
struct ggml_cgraph * gf,
struct ggml_cgraph * gb,
ggml_opt_callback callback,
void * callback_data) {
ggml_opt_callback cb_opt,
void * cb_opt_ctx) {
GGML_ASSERT(f->grad && "ggml_set_param must be called for at least one ancestor");
@ -21845,11 +21845,11 @@ enum ggml_opt_result ggml_opt_resume_g(
switch (opt->params.type) {
case GGML_OPT_TYPE_ADAM:
{
result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, cb_opt, cb_opt_ctx);
} break;
case GGML_OPT_TYPE_LBFGS:
{
result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, cb_opt, cb_opt_ctx);
} break;
}