From 9eb1ef86534ff61c9d517e3da72243b67a06714d Mon Sep 17 00:00:00 2001 From: xaedes Date: Tue, 15 Aug 2023 14:03:02 +0200 Subject: [PATCH] move and remove code --- examples/finetune/finetune.cpp | 30 +++++++++++------------------- 1 file changed, 11 insertions(+), 19 deletions(-) diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 11754ffd9..c02f3f859 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -167,6 +167,17 @@ struct ggml_tensor * randomize_tensor_uniform(struct ggml_tensor * tensor, struc return tensor; } +struct my_llama_kv_cache { + struct ggml_context * ctx = NULL; + + struct ggml_tensor * k; + struct ggml_tensor * v; + + // llama_ctx_buffer buf; + + int n; // number of tokens currently in the cache +}; + struct llama_vocab { using id = int32_t; using token = std::string; @@ -213,17 +224,6 @@ struct my_llama_layer { struct ggml_tensor * w3; }; -struct my_llama_kv_cache { - struct ggml_context * ctx = NULL; - - struct ggml_tensor * k; - struct ggml_tensor * v; - - // llama_ctx_buffer buf; - - int n; // number of tokens currently in the cache -}; - struct my_llama_model { struct ggml_context * ctx = NULL; @@ -1165,14 +1165,6 @@ void lshift_examples(struct ggml_tensor * tokens_input, struct ggml_tensor * tar } } -struct ggml_tensor * square_error_loss(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * target) { - return ggml_sum(ctx, ggml_sqr(ctx, ggml_sub(ctx, target, a))); -} - -struct ggml_tensor * cross_entropy_loss(struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * probs) { - return ggml_cross_entropy_loss(ctx, a, probs); -} - #ifdef __GNUC__ #ifdef __MINGW32__ __attribute__((format(gnu_printf, 1, 2)))