still merging in process
This commit is contained in:
parent
a8958f6b76
commit
a0cfed1e30
19 changed files with 6903 additions and 5644 deletions
|
@ -1,15 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "ggml-opencl.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void ggml_cl_init_legacy(void);
|
||||
|
||||
void ggml_cl_sgemm_wrapper_legacy(const enum ggml_blas_order order, const enum ggml_blas_op trans_a, const enum ggml_blas_op trans_b, const int m, const int n, const int k, const float alpha, const void *host_a, const int lda, const float *host_b, const int ldb, const float beta, float *host_c, const int ldc, const int btype);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -1,35 +0,0 @@
|
|||
#pragma once
|
||||
|
||||
#include "ggml.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
enum ggml_blas_order {
|
||||
GGML_BLAS_ORDER_ROW_MAJOR = 101,
|
||||
GGML_BLAS_ORDER_COLUMN_MAJOR = 102,
|
||||
};
|
||||
|
||||
enum ggml_blas_op {
|
||||
GGML_BLAS_OP_N = 111,
|
||||
GGML_BLAS_OP_T = 112,
|
||||
GGML_BLAS_OP_C = 113,
|
||||
};
|
||||
|
||||
void ggml_cl_init(void);
|
||||
|
||||
bool ggml_cl_can_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||
size_t ggml_cl_mul_mat_get_wsize(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst);
|
||||
void ggml_cl_mul_mat(const struct ggml_tensor * src0, const struct ggml_tensor * src1, struct ggml_tensor * dst, void * wdata, size_t wsize);
|
||||
|
||||
void * ggml_cl_host_malloc(size_t size);
|
||||
void ggml_cl_host_free(void * ptr);
|
||||
|
||||
void ggml_cl_transform_tensor(struct ggml_tensor * tensor);
|
||||
|
||||
void ggml_cl_sgemm_wrapper(const enum ggml_blas_order order, const enum ggml_blas_op trans_a, const enum ggml_blas_op trans_b, const int m, const int n, const int k, const float alpha, const void *host_a, const int lda, const float *host_b, const int ldb, const float beta, float *host_c, const int ldc, const int btype);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
4
ggml.c
4
ggml.c
|
@ -138,14 +138,14 @@ inline static void* ggml_aligned_malloc(size_t size) {
|
|||
#if defined(GGML_USE_ACCELERATE)
|
||||
#include <Accelerate/Accelerate.h>
|
||||
#if defined(GGML_USE_CLBLAST) // allow usage of CLBlast alongside Accelerate functions
|
||||
#include "ggml-opencl.h"
|
||||
#include "ggml_v2-opencl.h"
|
||||
#endif
|
||||
#elif defined(GGML_USE_OPENBLAS)
|
||||
#include <cblas.h>
|
||||
#elif defined(GGML_USE_CUBLAS)
|
||||
#include "ggml-cuda.h"
|
||||
#elif defined(GGML_USE_CLBLAST)
|
||||
#include "ggml-opencl.h"
|
||||
#include "ggml_v2-opencl.h"
|
||||
#endif
|
||||
|
||||
#undef MIN
|
||||
|
|
|
@ -12,7 +12,7 @@
|
|||
#include "otherarch.h"
|
||||
|
||||
//for easier compilation
|
||||
#include "llamaextra.cpp"
|
||||
#include "llama_v2.cpp"
|
||||
|
||||
//concat source files into one file for compilation purposes
|
||||
#include "utils.cpp"
|
||||
|
|
|
@ -1,86 +0,0 @@
|
|||
#include "ggml.h"
|
||||
#include "llamaextra.h"
|
||||
#include "llama.cpp"
|
||||
|
||||
|
||||
// TODO: Calculate this constant from the vocabulary
|
||||
#define MAX_TOKEN_LEN 18
|
||||
// SentencePiece implementation after https://guillaume-be.github.io/2020-05-30/sentence_piece
|
||||
std::vector<llama_token> legacy_llama_tokenize(const llama_vocab & vocab, const std::string & text, bool bos) {
|
||||
std::vector<llama_token> res;
|
||||
std::vector<int> score;
|
||||
std::vector<llama_token> prev;
|
||||
int len = text.length();
|
||||
|
||||
score.resize(len + 1);
|
||||
prev.resize(len + 1);
|
||||
|
||||
// Forward pass
|
||||
for (int i = 0; i < len; i++) {
|
||||
int max_len = std::min(len - i, MAX_TOKEN_LEN);
|
||||
for (int sub_len = 1; sub_len <= max_len; sub_len++) {
|
||||
auto sub = text.substr(i, sub_len);
|
||||
auto token = vocab.token_to_id.find(sub);
|
||||
if (token != vocab.token_to_id.end()) {
|
||||
int token_score = sub.length() * sub.length();
|
||||
int local_score = score[i] + token_score;
|
||||
int next = i + sub_len;
|
||||
if (score[next] < local_score) {
|
||||
score[next] = local_score;
|
||||
prev[next] = (*token).second;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Backward pass
|
||||
int i = len;
|
||||
while (i > 0) {
|
||||
llama_token token_id = prev[i];
|
||||
if (token_id == 0) {
|
||||
// TODO: Return error or something more meaningful
|
||||
printf("failed to tokenize string!\n");
|
||||
break;
|
||||
}
|
||||
res.push_back(token_id);
|
||||
auto token = vocab.id_to_token[token_id].tok;
|
||||
i -= token.length();
|
||||
}
|
||||
|
||||
if (bos) {
|
||||
res.push_back(1); // TODO: replace with vocab.bos
|
||||
}
|
||||
|
||||
// Pieces are in reverse order so correct that
|
||||
std::reverse(res.begin(), res.end());
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
int legacy_llama_tokenize(
|
||||
struct llama_context * ctx,
|
||||
const char * text,
|
||||
llama_token * tokens,
|
||||
int n_max_tokens,
|
||||
bool add_bos) {
|
||||
auto res = legacy_llama_tokenize(ctx->vocab, text, add_bos);
|
||||
|
||||
if (n_max_tokens < (int) res.size()) {
|
||||
fprintf(stderr, "%s: too many tokens\n", __func__);
|
||||
return -((int) res.size());
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < res.size(); i++) {
|
||||
tokens[i] = res[i];
|
||||
}
|
||||
|
||||
return res.size();
|
||||
}
|
||||
|
||||
std::vector<llama_token> legacy_llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos) {
|
||||
std::vector<llama_token> res(8096);
|
||||
int n = legacy_llama_tokenize(ctx, text.c_str(), res.data(), res.size(), add_bos);
|
||||
res.resize(n);
|
||||
|
||||
return res;
|
||||
}
|
18
llamaextra.h
18
llamaextra.h
|
@ -1,18 +0,0 @@
|
|||
#pragma once
|
||||
#include "common.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cinttypes>
|
||||
#include <cmath>
|
||||
#include <cstdio>
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <map>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "llama.h"
|
||||
#include "ggml.h"
|
||||
|
||||
std::vector<llama_token> legacy_llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos);
|
|
@ -1,4 +1,4 @@
|
|||
#include "ggml-opencl-legacy.h"
|
||||
#include "ggml_v2-opencl-legacy.h"
|
||||
|
||||
#define CL_TARGET_OPENCL_VERSION 110
|
||||
#include <clblast_c.h>
|
||||
|
@ -7,7 +7,7 @@
|
|||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
||||
#include "ggml.h"
|
||||
#include "ggml_v2.h"
|
||||
|
||||
#define MULTILINE_QUOTE(...) #__VA_ARGS__
|
||||
const char * clblast_dequant_legacy = MULTILINE_QUOTE(
|
||||
|
@ -171,7 +171,7 @@ __kernel void dequantize_row_q8_0(__global struct block_q8_0* blocks, __global f
|
|||
|
||||
#define QK5_0 32
|
||||
typedef struct {
|
||||
ggml_fp16_t d; // delta
|
||||
ggml_v2_fp16_t d; // delta
|
||||
uint8_t qh[4]; // 5-th bit of quants
|
||||
uint8_t qs[QK5_0 / 2]; // nibbles / quants
|
||||
} block_q5_0;
|
||||
|
@ -221,12 +221,12 @@ static cl_program build_program_from_source(cl_context ctx, cl_device_id dev, co
|
|||
return p;
|
||||
}
|
||||
|
||||
void ggml_cl_init_legacy(void) {
|
||||
void ggml_v2_cl_init_legacy(void) {
|
||||
cl_int err = 0;
|
||||
char * GGML_CLBLAST_PLATFORM = getenv("GGML_CLBLAST_PLATFORM");
|
||||
char * GGML_CLBLAST_DEVICE = getenv("GGML_CLBLAST_DEVICE");
|
||||
int plat_num = (GGML_CLBLAST_PLATFORM == NULL ? 0 : atoi(GGML_CLBLAST_PLATFORM));
|
||||
int dev_num = (GGML_CLBLAST_DEVICE == NULL ? 0 : atoi(GGML_CLBLAST_DEVICE));
|
||||
char * GGML_V2_CLBLAST_PLATFORM = getenv("GGML_CLBLAST_PLATFORM");
|
||||
char * GGML_V2_CLBLAST_DEVICE = getenv("GGML_CLBLAST_DEVICE");
|
||||
int plat_num = (GGML_V2_CLBLAST_PLATFORM == NULL ? 0 : atoi(GGML_V2_CLBLAST_PLATFORM));
|
||||
int dev_num = (GGML_V2_CLBLAST_DEVICE == NULL ? 0 : atoi(GGML_V2_CLBLAST_DEVICE));
|
||||
printf("\nInitializing LEGACY CLBlast (First Run)...");
|
||||
printf("\nAttempting to use: Platform=%d, Device=%d (If invalid, program will crash)\n",plat_num,dev_num);
|
||||
cl_uint num_platforms;
|
||||
|
@ -271,7 +271,7 @@ void ggml_cl_init_legacy(void) {
|
|||
CL_CHECK(err, "clCreateKernel");
|
||||
}
|
||||
|
||||
static void ggml_cl_malloc(size_t req_size, size_t* cur_size, cl_mem_flags flags, cl_mem* buf) {
|
||||
static void ggml_v2_cl_malloc(size_t req_size, size_t* cur_size, cl_mem_flags flags, cl_mem* buf) {
|
||||
if (req_size <= *cur_size) {
|
||||
return;
|
||||
}
|
||||
|
@ -286,8 +286,8 @@ static void ggml_cl_malloc(size_t req_size, size_t* cur_size, cl_mem_flags flags
|
|||
CL_CHECK(err, "clCreateBuffer");
|
||||
}
|
||||
|
||||
void ggml_cl_sgemm_wrapper_legacy(
|
||||
const enum ggml_blas_order order, const enum ggml_blas_op trans_a, const enum ggml_blas_op trans_b,
|
||||
void ggml_v2_cl_sgemm_wrapper_legacy(
|
||||
const enum ggml_v2_blas_order order, const enum ggml_v2_blas_op trans_a, const enum ggml_v2_blas_op trans_b,
|
||||
const int m, const int n, const int k,
|
||||
const float alpha, const void *host_a, const int lda,
|
||||
const float *host_b, const int ldb, const float beta,
|
||||
|
@ -300,34 +300,34 @@ void ggml_cl_sgemm_wrapper_legacy(
|
|||
cl_block_q5_0* cl_host_b;
|
||||
|
||||
switch (btype) {
|
||||
case GGML_TYPE_F32:
|
||||
case GGML_V2_TYPE_F32:
|
||||
dequant = false;
|
||||
break;
|
||||
case GGML_TYPE_Q4_0:
|
||||
case GGML_V2_TYPE_Q4_0:
|
||||
dequant = true;
|
||||
kernel = kernel_q4_0;
|
||||
local = 16;
|
||||
size_qb = global * (sizeof(float) + local) / 32;
|
||||
break;
|
||||
case GGML_TYPE_Q4_1:
|
||||
case GGML_V2_TYPE_Q4_1:
|
||||
dequant = true;
|
||||
kernel = kernel_q4_1;
|
||||
local = 16;
|
||||
size_qb = global * (sizeof(float) * 2 + local) / 32;
|
||||
break;
|
||||
case GGML_TYPE_Q4_2:
|
||||
case GGML_V2_TYPE_Q4_2:
|
||||
dequant = true;
|
||||
kernel = kernel_q4_2;
|
||||
local = 8;
|
||||
size_qb = global * (sizeof(ggml_fp16_t) + local) / 16;
|
||||
size_qb = global * (sizeof(ggml_v2_fp16_t) + local) / 16;
|
||||
break;
|
||||
case GGML_TYPE_Q4_3:
|
||||
case GGML_V2_TYPE_Q4_3:
|
||||
dequant = true;
|
||||
kernel = kernel_q4_3;
|
||||
local = 8;
|
||||
size_qb = global * (sizeof(short) * 2 + local) / 16;
|
||||
break;
|
||||
case GGML_TYPE_Q5_0:
|
||||
case GGML_V2_TYPE_Q5_0:
|
||||
dequant = true;
|
||||
kernel = kernel_q5_0;
|
||||
local = 16;
|
||||
|
@ -337,20 +337,20 @@ void ggml_cl_sgemm_wrapper_legacy(
|
|||
const block_q5_0* b = (const block_q5_0*) host_b;
|
||||
cl_host_b = (cl_block_q5_0*) malloc(sizeof(cl_block_q5_0) * global / 32);
|
||||
for (size_t i = 0; i < global / 32; i++) {
|
||||
cl_host_b[i].d = ggml_fp16_to_fp32(b[i].d);
|
||||
cl_host_b[i].d = ggml_v2_fp16_to_fp32(b[i].d);
|
||||
memcpy(&cl_host_b[i].qh, b[i].qh, sizeof(uint32_t));
|
||||
memcpy(&cl_host_b[i].qs, b[i].qs, QK5_0 / 2);
|
||||
}
|
||||
host_b = (const float*) cl_host_b;
|
||||
size_qb = global * (sizeof(float) + sizeof(uint32_t) + local) / 32;
|
||||
break;
|
||||
case GGML_TYPE_Q5_1:
|
||||
case GGML_V2_TYPE_Q5_1:
|
||||
dequant = true;
|
||||
kernel = kernel_q5_1;
|
||||
local = 16;
|
||||
size_qb = global * (sizeof(ggml_fp16_t) * 2 + sizeof(uint32_t) + local) / 32;
|
||||
size_qb = global * (sizeof(ggml_v2_fp16_t) * 2 + sizeof(uint32_t) + local) / 32;
|
||||
break;
|
||||
case GGML_TYPE_Q8_0:
|
||||
case GGML_V2_TYPE_Q8_0:
|
||||
dequant = true;
|
||||
kernel = kernel_q8_0;
|
||||
local = 32;
|
||||
|
@ -366,12 +366,12 @@ void ggml_cl_sgemm_wrapper_legacy(
|
|||
const size_t size_c = m * n * sizeof(float);
|
||||
|
||||
// Prepare buffers
|
||||
ggml_cl_malloc(size_a, &cl_size_a, CL_MEM_READ_ONLY, &cl_buffer_a);
|
||||
ggml_v2_cl_malloc(size_a, &cl_size_a, CL_MEM_READ_ONLY, &cl_buffer_a);
|
||||
if (dequant) {
|
||||
ggml_cl_malloc(size_qb, &cl_size_qb, CL_MEM_READ_ONLY, &cl_buffer_qb);
|
||||
ggml_v2_cl_malloc(size_qb, &cl_size_qb, CL_MEM_READ_ONLY, &cl_buffer_qb);
|
||||
}
|
||||
ggml_cl_malloc(size_b, &cl_size_b, CL_MEM_READ_WRITE, &cl_buffer_b);
|
||||
ggml_cl_malloc(size_c, &cl_size_c, CL_MEM_WRITE_ONLY, &cl_buffer_c);
|
||||
ggml_v2_cl_malloc(size_b, &cl_size_b, CL_MEM_READ_WRITE, &cl_buffer_b);
|
||||
ggml_v2_cl_malloc(size_c, &cl_size_c, CL_MEM_WRITE_ONLY, &cl_buffer_c);
|
||||
|
||||
cl_event ev_a, ev_qb, ev_b;
|
||||
|
||||
|
@ -421,7 +421,7 @@ void ggml_cl_sgemm_wrapper_legacy(
|
|||
clWaitForEvents(1, &ev_c);
|
||||
clReleaseEvent(ev_sgemm);
|
||||
clReleaseEvent(ev_c);
|
||||
if (btype == GGML_TYPE_Q5_0) {
|
||||
if (btype == GGML_V2_TYPE_Q5_0) {
|
||||
free((void*) cl_host_b);
|
||||
}
|
||||
}
|
15
otherarch/ggml_v2-opencl-legacy.h
Normal file
15
otherarch/ggml_v2-opencl-legacy.h
Normal file
|
@ -0,0 +1,15 @@
|
|||
#pragma once
|
||||
|
||||
#include "ggml_v2-opencl.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
void ggml_v2_cl_init_legacy(void);
|
||||
|
||||
void ggml_v2_cl_sgemm_wrapper_legacy(const enum ggml_v2_blas_order order, const enum ggml_v2_blas_op trans_a, const enum ggml_v2_blas_op trans_b, const int m, const int n, const int k, const float alpha, const void *host_a, const int lda, const float *host_b, const int ldb, const float beta, float *host_c, const int ldc, const int btype);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
|
@ -1,4 +1,4 @@
|
|||
#include "ggml-opencl.h"
|
||||
#include "ggml_v2-opencl.h"
|
||||
|
||||
#include <array>
|
||||
#include <atomic>
|
35
otherarch/ggml_v2-opencl.h
Normal file
35
otherarch/ggml_v2-opencl.h
Normal file
|
@ -0,0 +1,35 @@
|
|||
#pragma once
|
||||
|
||||
#include "ggml_v2.h"
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
enum ggml_v2_blas_order {
|
||||
GGML_V2_BLAS_ORDER_ROW_MAJOR = 101,
|
||||
GGML_V2_BLAS_ORDER_COLUMN_MAJOR = 102,
|
||||
};
|
||||
|
||||
enum ggml_v2_blas_op {
|
||||
GGML_V2_BLAS_OP_N = 111,
|
||||
GGML_V2_BLAS_OP_T = 112,
|
||||
GGML_V2_BLAS_OP_C = 113,
|
||||
};
|
||||
|
||||
void ggml_v2_cl_init(void);
|
||||
|
||||
bool ggml_v2_cl_can_mul_mat(const struct ggml_v2_tensor * src0, const struct ggml_v2_tensor * src1, struct ggml_v2_tensor * dst);
|
||||
size_t ggml_v2_cl_mul_mat_get_wsize(const struct ggml_v2_tensor * src0, const struct ggml_v2_tensor * src1, struct ggml_v2_tensor * dst);
|
||||
void ggml_v2_cl_mul_mat(const struct ggml_v2_tensor * src0, const struct ggml_v2_tensor * src1, struct ggml_v2_tensor * dst, void * wdata, size_t wsize);
|
||||
|
||||
void * ggml_v2_cl_host_malloc(size_t size);
|
||||
void ggml_v2_cl_host_free(void * ptr);
|
||||
|
||||
void ggml_v2_cl_transform_tensor(struct ggml_v2_tensor * tensor);
|
||||
|
||||
void ggml_v2_cl_sgemm_wrapper(const enum ggml_v2_blas_order order, const enum ggml_v2_blas_op trans_a, const enum ggml_v2_blas_op trans_b, const int m, const int n, const int k, const float alpha, const void *host_a, const int lda, const float *host_b, const int ldb, const float beta, float *host_c, const int ldc, const int btype);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
File diff suppressed because it is too large
Load diff
1143
otherarch/ggml_v2.h
Normal file
1143
otherarch/ggml_v2.h
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1,4 +1,4 @@
|
|||
#include "ggml.h"
|
||||
#include "ggml_v2.h"
|
||||
#include "otherarch.h"
|
||||
|
||||
#include "utils.h"
|
||||
|
@ -16,7 +16,7 @@
|
|||
#include "model_adapter.h"
|
||||
|
||||
#if defined(GGML_USE_CLBLAST)
|
||||
#include "ggml-opencl.h"
|
||||
#include "ggml_v2-opencl.h"
|
||||
#endif
|
||||
|
||||
// load the model's weights from a file
|
||||
|
@ -50,7 +50,7 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
|||
fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
|
||||
fin.read((char *) &hparams.ftype, sizeof(hparams.ftype));
|
||||
|
||||
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
||||
const int32_t qntvr = hparams.ftype / GGML_V2_QNT_VERSION_FACTOR;
|
||||
|
||||
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
||||
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
||||
|
@ -60,7 +60,7 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
|||
printf("%s: ftype = %d\n", __func__, hparams.ftype);
|
||||
printf("%s: qntvr = %d\n", __func__, qntvr);
|
||||
|
||||
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
|
||||
hparams.ftype %= GGML_V2_QNT_VERSION_FACTOR;
|
||||
}
|
||||
|
||||
// load vocab
|
||||
|
@ -87,12 +87,12 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
|||
}
|
||||
}
|
||||
|
||||
auto memory_type = GGML_TYPE_F16;
|
||||
auto memory_type = GGML_V2_TYPE_F16;
|
||||
|
||||
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
|
||||
// in order to save memory and also to speed up the computation
|
||||
ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype));
|
||||
if (wtype == GGML_TYPE_COUNT) {
|
||||
ggml_v2_type wtype = ggml_v2_ftype_to_ggml_v2_type((ggml_v2_ftype) (model.hparams.ftype));
|
||||
if (wtype == GGML_V2_TYPE_COUNT) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n",
|
||||
__func__, fname.c_str(), model.hparams.ftype);
|
||||
return ModelLoadResult::FAIL;
|
||||
|
@ -110,51 +110,51 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
|||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_g
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_b
|
||||
ctx_size += n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32); // ln_f_g
|
||||
ctx_size += n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32); // ln_f_b
|
||||
|
||||
ctx_size += n_vocab*n_embd*ggml_type_sizef(wtype); // wte
|
||||
ctx_size += n_ctx*n_embd*ggml_type_sizef(GGML_TYPE_F32); // wpe
|
||||
ctx_size += n_vocab*n_embd*ggml_type_sizef(wtype); // lm_head
|
||||
ctx_size += n_vocab*n_embd*ggml_v2_type_sizef(wtype); // wte
|
||||
ctx_size += n_ctx*n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32); // wpe
|
||||
ctx_size += n_vocab*n_embd*ggml_v2_type_sizef(wtype); // lm_head
|
||||
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_g
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_b
|
||||
ctx_size += n_layer*(n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // ln_1_g
|
||||
ctx_size += n_layer*(n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // ln_1_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_g
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_b
|
||||
ctx_size += n_layer*(n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // ln_2_g
|
||||
ctx_size += n_layer*(n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // ln_2_b
|
||||
|
||||
ctx_size += n_layer*(3*n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_attn_w
|
||||
ctx_size += n_layer*( 3*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_attn_b
|
||||
ctx_size += n_layer*(3*n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_attn_attn_w
|
||||
ctx_size += n_layer*( 3*n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // c_attn_attn_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_proj_w
|
||||
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_proj_b
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_attn_proj_w
|
||||
ctx_size += n_layer*( n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // c_attn_proj_b
|
||||
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_fc_w
|
||||
ctx_size += n_layer*( 4*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_fc_b
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_mlp_fc_w
|
||||
ctx_size += n_layer*( 4*n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // c_mlp_fc_b
|
||||
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
|
||||
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_mlp_proj_w
|
||||
ctx_size += n_layer*( n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // c_mlp_proj_b
|
||||
|
||||
ctx_size += 1.5*(n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // memory_k
|
||||
ctx_size += 1.5*(n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // memory_v
|
||||
ctx_size += 1.5*(n_ctx*n_layer*n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // memory_k
|
||||
ctx_size += 1.5*(n_ctx*n_layer*n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // memory_v
|
||||
|
||||
ctx_size += (6 + 12*n_layer)*512; // object overhead
|
||||
|
||||
printf("%s: ggml tensor size = %d bytes\n", __func__, (int) sizeof(ggml_tensor));
|
||||
printf("%s: ggml tensor size = %d bytes\n", __func__, (int) sizeof(ggml_v2_tensor));
|
||||
printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0));
|
||||
}
|
||||
|
||||
// create the ggml context
|
||||
{
|
||||
struct ggml_init_params params;
|
||||
struct ggml_v2_init_params params;
|
||||
params.mem_size = ctx_size;
|
||||
params.mem_buffer = NULL;
|
||||
params.no_alloc = false;
|
||||
|
||||
|
||||
model.ctx = ggml_init(params);
|
||||
model.ctx = ggml_v2_init(params);
|
||||
if (!model.ctx) {
|
||||
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
||||
fprintf(stderr, "%s: ggml_v2_init() failed\n", __func__);
|
||||
return ModelLoadResult::FAIL;
|
||||
}
|
||||
}
|
||||
|
@ -170,12 +170,12 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
|||
|
||||
model.layers.resize(n_layer);
|
||||
|
||||
model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
model.ln_f_g = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
model.ln_f_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
model.wpe = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_ctx);
|
||||
model.lm_head = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
model.wte = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
model.wpe = ggml_v2_new_tensor_2d(ctx, GGML_V2_TYPE_F32, n_embd, n_ctx);
|
||||
model.lm_head = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
|
||||
// map by name
|
||||
model.tensors["model/ln_f/g"] = model.ln_f_g;
|
||||
|
@ -188,23 +188,23 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
|||
for (int i = 0; i < n_layer; ++i) {
|
||||
auto & layer = model.layers[i];
|
||||
|
||||
layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_1_g = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
layer.ln_1_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_2_g = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
layer.ln_2_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd);
|
||||
layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd);
|
||||
layer.c_attn_attn_w = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd);
|
||||
layer.c_attn_attn_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, 3*n_embd);
|
||||
|
||||
layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.c_attn_proj_w = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_proj_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
|
||||
layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd);
|
||||
layer.c_mlp_fc_w = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
|
||||
layer.c_mlp_fc_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, 4*n_embd);
|
||||
|
||||
layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
||||
layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.c_mlp_proj_w = ggml_v2_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
||||
layer.c_mlp_proj_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
// map by name
|
||||
model.tensors["model/h" + std::to_string(i) + "/ln_1/g"] = layer.ln_1_g;
|
||||
|
@ -238,10 +238,10 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
|||
const int n_mem = n_layer*n_ctx;
|
||||
const int n_elements = n_embd*n_mem;
|
||||
|
||||
model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements*1.5);
|
||||
model.memory_v = ggml_new_tensor_1d(ctx, memory_type, n_elements*1.5);
|
||||
model.memory_k = ggml_v2_new_tensor_1d(ctx, memory_type, n_elements*1.5);
|
||||
model.memory_v = ggml_v2_new_tensor_1d(ctx, memory_type, n_elements*1.5);
|
||||
|
||||
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
|
||||
const size_t memory_size = ggml_v2_nbytes(model.memory_k) + ggml_v2_nbytes(model.memory_v);
|
||||
|
||||
printf("%s: memory size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem);
|
||||
}
|
||||
|
@ -281,7 +281,7 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
|||
}
|
||||
|
||||
auto tensor = model.tensors[name.data()];
|
||||
if (ggml_nelements(tensor) != nelements) {
|
||||
if (ggml_v2_nelements(tensor) != nelements) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
||||
return ModelLoadResult::FAIL;
|
||||
}
|
||||
|
@ -294,29 +294,29 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
|||
|
||||
// for debugging
|
||||
if (0) {
|
||||
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor));
|
||||
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_v2_type_name(ggml_v2_type(ttype)), ggml_v2_nbytes(tensor)/1024.0/1024.0, ggml_v2_nbytes(tensor));
|
||||
}
|
||||
|
||||
const size_t bpe = ggml_type_size(ggml_type(ttype));
|
||||
const size_t bpe = ggml_v2_type_size(ggml_v2_type(ttype));
|
||||
|
||||
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
|
||||
if ((nelements*bpe)/ggml_v2_blck_size(tensor->type) != ggml_v2_nbytes(tensor)) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
||||
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
||||
__func__, name.data(), ggml_v2_nbytes(tensor), nelements*bpe);
|
||||
return ModelLoadResult::FAIL;
|
||||
}
|
||||
|
||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_v2_nbytes(tensor));
|
||||
|
||||
// GPT-2 models share the WTE tensor as the LM head
|
||||
if (name == "model/wte" && has_lm_head == false) {
|
||||
memcpy(model.lm_head->data, tensor->data, ggml_nbytes(tensor));
|
||||
memcpy(model.lm_head->data, tensor->data, ggml_v2_nbytes(tensor));
|
||||
}
|
||||
|
||||
if (name == "model/lm_head") {
|
||||
has_lm_head = true;
|
||||
}
|
||||
|
||||
total_size += ggml_nbytes(tensor);
|
||||
total_size += ggml_v2_nbytes(tensor);
|
||||
}
|
||||
|
||||
printf("%s: model size = %8.2f MB\n", __func__, total_size/1024.0/1024.0);
|
||||
|
@ -341,18 +341,18 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g
|
|||
// for (int i = 0; i < n_gpu; ++i) {
|
||||
// const auto & layer = model.layers[i];
|
||||
|
||||
// ggml_cl_transform_tensor(layer.ln_1_g); vram_total += ggml_nbytes(layer.ln_1_g);
|
||||
// ggml_cl_transform_tensor(layer.ln_1_b); vram_total += ggml_nbytes(layer.ln_1_b);
|
||||
// ggml_cl_transform_tensor(layer.ln_2_g); vram_total += ggml_nbytes(layer.ln_2_g);
|
||||
// ggml_cl_transform_tensor(layer.ln_2_b); vram_total += ggml_nbytes(layer.ln_2_b);
|
||||
// ggml_cl_transform_tensor(layer.c_attn_attn_w); vram_total += ggml_nbytes(layer.c_attn_attn_w);
|
||||
// ggml_cl_transform_tensor(layer.c_attn_attn_b); vram_total += ggml_nbytes(layer.c_attn_attn_b);
|
||||
// ggml_cl_transform_tensor(layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
// ggml_cl_transform_tensor(layer.c_attn_proj_b); vram_total += ggml_nbytes(layer.c_attn_proj_b);
|
||||
// ggml_cl_transform_tensor(layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
// ggml_cl_transform_tensor(layer.c_mlp_fc_b); vram_total += ggml_nbytes(layer.c_mlp_fc_b);
|
||||
// ggml_cl_transform_tensor(layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
// ggml_cl_transform_tensor(layer.c_mlp_proj_b); vram_total += ggml_nbytes(layer.c_mlp_proj_b);
|
||||
// ggml_v2_cl_transform_tensor(layer.ln_1_g); vram_total += ggml_v2_nbytes(layer.ln_1_g);
|
||||
// ggml_v2_cl_transform_tensor(layer.ln_1_b); vram_total += ggml_v2_nbytes(layer.ln_1_b);
|
||||
// ggml_v2_cl_transform_tensor(layer.ln_2_g); vram_total += ggml_v2_nbytes(layer.ln_2_g);
|
||||
// ggml_v2_cl_transform_tensor(layer.ln_2_b); vram_total += ggml_v2_nbytes(layer.ln_2_b);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_attn_attn_w); vram_total += ggml_v2_nbytes(layer.c_attn_attn_w);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_attn_attn_b); vram_total += ggml_v2_nbytes(layer.c_attn_attn_b);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_attn_proj_w); vram_total += ggml_v2_nbytes(layer.c_attn_proj_w);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_attn_proj_b); vram_total += ggml_v2_nbytes(layer.c_attn_proj_b);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_mlp_fc_w); vram_total += ggml_v2_nbytes(layer.c_mlp_fc_w);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_mlp_fc_b); vram_total += ggml_v2_nbytes(layer.c_mlp_fc_b);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_mlp_proj_w); vram_total += ggml_v2_nbytes(layer.c_mlp_proj_w);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_mlp_proj_b); vram_total += ggml_v2_nbytes(layer.c_mlp_proj_b);
|
||||
// }
|
||||
|
||||
// fprintf(stderr, "%s: [opencl] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
||||
|
@ -417,45 +417,45 @@ bool gpt2_eval(
|
|||
}
|
||||
}
|
||||
|
||||
struct ggml_init_params params;
|
||||
struct ggml_v2_init_params params;
|
||||
params.mem_size = buf_size;
|
||||
params.mem_buffer = buf;
|
||||
params.no_alloc = false;
|
||||
|
||||
|
||||
struct ggml_context * ctx0 = ggml_init(params);
|
||||
struct ggml_cgraph gf = {};
|
||||
struct ggml_v2_context * ctx0 = ggml_v2_init(params);
|
||||
struct ggml_v2_cgraph gf = {};
|
||||
gf.n_threads = n_threads;
|
||||
|
||||
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
|
||||
struct ggml_v2_tensor * embd = ggml_v2_new_tensor_1d(ctx0, GGML_V2_TYPE_I32, N);
|
||||
memcpy(embd->data, embd_inp.data(), N*ggml_v2_element_size(embd));
|
||||
|
||||
struct ggml_tensor * position = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
struct ggml_v2_tensor * position = ggml_v2_new_tensor_1d(ctx0, GGML_V2_TYPE_I32, N);
|
||||
for (int i = 0; i < N; ++i) {
|
||||
((int32_t *) position->data)[i] = n_past + i;
|
||||
}
|
||||
|
||||
// wte + wpe
|
||||
struct ggml_tensor * inpL =
|
||||
ggml_add(ctx0,
|
||||
ggml_get_rows(ctx0, model.wte, embd),
|
||||
ggml_get_rows(ctx0, model.wpe, position));
|
||||
struct ggml_v2_tensor * inpL =
|
||||
ggml_v2_add(ctx0,
|
||||
ggml_v2_get_rows(ctx0, model.wte, embd),
|
||||
ggml_v2_get_rows(ctx0, model.wpe, position));
|
||||
|
||||
for (int il = 0; il < n_layer; ++il) {
|
||||
struct ggml_tensor * cur;
|
||||
struct ggml_v2_tensor * cur;
|
||||
|
||||
// norm
|
||||
{
|
||||
// [ 768, N]
|
||||
cur = ggml_norm(ctx0, inpL);
|
||||
cur = ggml_v2_norm(ctx0, inpL);
|
||||
|
||||
// cur = ln_1_g*cur + ln_1_b
|
||||
// [ 768, N]
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].ln_1_g, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_mul(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.layers[il].ln_1_g, cur),
|
||||
cur),
|
||||
ggml_repeat(ctx0, model.layers[il].ln_1_b, cur));
|
||||
ggml_v2_repeat(ctx0, model.layers[il].ln_1_b, cur));
|
||||
}
|
||||
|
||||
// attn
|
||||
|
@ -467,104 +467,104 @@ bool gpt2_eval(
|
|||
// cur = attn_w*cur + attn_b
|
||||
// [2304, N]
|
||||
{
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
cur = ggml_v2_mul_mat(ctx0,
|
||||
model.layers[il].c_attn_attn_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_attn_attn_b, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.layers[il].c_attn_attn_b, cur),
|
||||
cur);
|
||||
}
|
||||
|
||||
// self-attention
|
||||
{
|
||||
struct ggml_tensor * Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd);
|
||||
struct ggml_tensor * Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd);
|
||||
struct ggml_tensor * Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd);
|
||||
struct ggml_v2_tensor * Qcur = ggml_v2_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0*sizeof(float)*n_embd);
|
||||
struct ggml_v2_tensor * Kcur = ggml_v2_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1*sizeof(float)*n_embd);
|
||||
struct ggml_v2_tensor * Vcur = ggml_v2_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2*sizeof(float)*n_embd);
|
||||
|
||||
// store key and value to memory
|
||||
if (N >= 1) {
|
||||
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
|
||||
struct ggml_tensor * v = ggml_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past));
|
||||
struct ggml_v2_tensor * k = ggml_v2_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_v2_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
|
||||
struct ggml_v2_tensor * v = ggml_v2_view_1d(ctx0, model.memory_v, N*n_embd, (ggml_v2_element_size(model.memory_v)*n_embd)*(il*n_ctx + n_past));
|
||||
|
||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
|
||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
|
||||
ggml_v2_build_forward_expand(&gf, ggml_v2_cpy(ctx0, Kcur, k));
|
||||
ggml_v2_build_forward_expand(&gf, ggml_v2_cpy(ctx0, Vcur, v));
|
||||
}
|
||||
|
||||
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
|
||||
// [64, N, 12]
|
||||
struct ggml_tensor * Q =
|
||||
ggml_permute(ctx0,
|
||||
ggml_cpy(ctx0,
|
||||
struct ggml_v2_tensor * Q =
|
||||
ggml_v2_permute(ctx0,
|
||||
ggml_v2_cpy(ctx0,
|
||||
Qcur,
|
||||
ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd/n_head, n_head, N)),
|
||||
ggml_v2_new_tensor_3d(ctx0, GGML_V2_TYPE_F32, n_embd/n_head, n_head, N)),
|
||||
0, 2, 1, 3);
|
||||
|
||||
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
|
||||
// [64, n_past + N, 12]
|
||||
struct ggml_tensor * K =
|
||||
ggml_permute(ctx0,
|
||||
ggml_reshape_3d(ctx0,
|
||||
ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd),
|
||||
struct ggml_v2_tensor * K =
|
||||
ggml_v2_permute(ctx0,
|
||||
ggml_v2_reshape_3d(ctx0,
|
||||
ggml_v2_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_v2_element_size(model.memory_k)*n_embd),
|
||||
n_embd/n_head, n_head, n_past + N),
|
||||
0, 2, 1, 3);
|
||||
|
||||
// GG: flash attention
|
||||
//struct ggml_tensor * V =
|
||||
// ggml_cpy(ctx0,
|
||||
// ggml_permute(ctx0,
|
||||
// ggml_reshape_3d(ctx0,
|
||||
// ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
|
||||
//struct ggml_v2_tensor * V =
|
||||
// ggml_v2_cpy(ctx0,
|
||||
// ggml_v2_permute(ctx0,
|
||||
// ggml_v2_reshape_3d(ctx0,
|
||||
// ggml_v2_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_v2_element_size(model.memory_v)*n_embd),
|
||||
// n_embd/n_head, n_head, n_past + N),
|
||||
// 1, 2, 0, 3),
|
||||
// ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head));
|
||||
// ggml_v2_new_tensor_3d(ctx0, GGML_V2_TYPE_F32, n_past + N, n_embd/n_head, n_head));
|
||||
|
||||
//struct ggml_tensor * KQV = ggml_flash_attn(ctx0, Q, K, V, true);
|
||||
//struct ggml_v2_tensor * KQV = ggml_v2_flash_attn(ctx0, Q, K, V, true);
|
||||
|
||||
// K * Q
|
||||
// [n_past + N, N, 12]
|
||||
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
|
||||
struct ggml_v2_tensor * KQ = ggml_v2_mul_mat(ctx0, K, Q);
|
||||
|
||||
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
||||
// [n_past + N, N, 12]
|
||||
struct ggml_tensor * KQ_scaled =
|
||||
ggml_scale_inplace(ctx0,
|
||||
struct ggml_v2_tensor * KQ_scaled =
|
||||
ggml_v2_scale_inplace(ctx0,
|
||||
KQ,
|
||||
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
|
||||
ggml_v2_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
|
||||
);
|
||||
|
||||
// KQ_masked = mask_past(KQ_scaled)
|
||||
// [n_past + N, N, 12]
|
||||
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
|
||||
struct ggml_v2_tensor * KQ_masked = ggml_v2_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
|
||||
|
||||
// KQ = soft_max(KQ_masked)
|
||||
// [n_past + N, N, 12]
|
||||
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
|
||||
struct ggml_v2_tensor * KQ_soft_max = ggml_v2_soft_max_inplace(ctx0, KQ_masked);
|
||||
|
||||
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
|
||||
// [n_past + N, 64, 12]
|
||||
struct ggml_tensor * V_trans =
|
||||
ggml_cpy(ctx0,
|
||||
ggml_permute(ctx0,
|
||||
ggml_reshape_3d(ctx0,
|
||||
ggml_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_v)*n_embd),
|
||||
struct ggml_v2_tensor * V_trans =
|
||||
ggml_v2_cpy(ctx0,
|
||||
ggml_v2_permute(ctx0,
|
||||
ggml_v2_reshape_3d(ctx0,
|
||||
ggml_v2_view_1d(ctx0, model.memory_v, (n_past + N)*n_embd, il*n_ctx*ggml_v2_element_size(model.memory_v)*n_embd),
|
||||
n_embd/n_head, n_head, n_past + N),
|
||||
1, 2, 0, 3),
|
||||
ggml_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd/n_head, n_head));
|
||||
ggml_v2_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd/n_head, n_head));
|
||||
|
||||
// KQV = transpose(V) * KQ_soft_max
|
||||
// [64, N, 12]
|
||||
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max);
|
||||
struct ggml_v2_tensor * KQV = ggml_v2_mul_mat(ctx0, V_trans, KQ_soft_max);
|
||||
|
||||
// KQV_merged = KQV.permute(0, 2, 1, 3)
|
||||
// [64, 12, N]
|
||||
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
|
||||
struct ggml_v2_tensor * KQV_merged = ggml_v2_permute(ctx0, KQV, 0, 2, 1, 3);
|
||||
|
||||
// cur = KQV_merged.contiguous().view(n_embd, N)
|
||||
// [768, N]
|
||||
cur = ggml_cpy(ctx0,
|
||||
cur = ggml_v2_cpy(ctx0,
|
||||
KQV_merged,
|
||||
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
|
||||
ggml_v2_new_tensor_2d(ctx0, GGML_V2_TYPE_F32, n_embd, N));
|
||||
}
|
||||
|
||||
// projection
|
||||
|
@ -576,33 +576,33 @@ bool gpt2_eval(
|
|||
// cur = proj_w*cur + proj_b
|
||||
// [768, N]
|
||||
{
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
cur = ggml_v2_mul_mat(ctx0,
|
||||
model.layers[il].c_attn_proj_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_attn_proj_b, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.layers[il].c_attn_proj_b, cur),
|
||||
cur);
|
||||
}
|
||||
|
||||
// add the input
|
||||
cur = ggml_add(ctx0, cur, inpL);
|
||||
cur = ggml_v2_add(ctx0, cur, inpL);
|
||||
|
||||
struct ggml_tensor * inpFF = cur;
|
||||
struct ggml_v2_tensor * inpFF = cur;
|
||||
|
||||
// feed-forward network
|
||||
{
|
||||
// norm
|
||||
{
|
||||
cur = ggml_norm(ctx0, inpFF);
|
||||
cur = ggml_v2_norm(ctx0, inpFF);
|
||||
|
||||
// cur = ln_2_g*cur + ln_2_b
|
||||
// [ 768, N]
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].ln_2_g, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_mul(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.layers[il].ln_2_g, cur),
|
||||
cur),
|
||||
ggml_repeat(ctx0, model.layers[il].ln_2_b, cur));
|
||||
ggml_v2_repeat(ctx0, model.layers[il].ln_2_b, cur));
|
||||
}
|
||||
|
||||
// fully connected
|
||||
|
@ -613,17 +613,17 @@ bool gpt2_eval(
|
|||
//
|
||||
// cur = fc_w*cur + fc_b
|
||||
// [3072, N]
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
cur = ggml_v2_mul_mat(ctx0,
|
||||
model.layers[il].c_mlp_fc_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
|
||||
cur);
|
||||
|
||||
// GELU activation
|
||||
// [3072, N]
|
||||
cur = ggml_gelu(ctx0, cur);
|
||||
cur = ggml_v2_gelu(ctx0, cur);
|
||||
|
||||
// projection
|
||||
// [ 768, 3072] - model.layers[il].c_mlp_proj_w
|
||||
|
@ -633,63 +633,63 @@ bool gpt2_eval(
|
|||
//
|
||||
// cur = proj_w*cur + proj_b
|
||||
// [768, N]
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
cur = ggml_v2_mul_mat(ctx0,
|
||||
model.layers[il].c_mlp_proj_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur),
|
||||
cur);
|
||||
}
|
||||
|
||||
// input for next layer
|
||||
inpL = ggml_add(ctx0, cur, inpFF);
|
||||
inpL = ggml_v2_add(ctx0, cur, inpFF);
|
||||
}
|
||||
|
||||
// norm
|
||||
{
|
||||
// [ 768, N]
|
||||
inpL = ggml_norm(ctx0, inpL);
|
||||
inpL = ggml_v2_norm(ctx0, inpL);
|
||||
|
||||
// inpL = ln_f_g*inpL + ln_f_b
|
||||
// [ 768, N]
|
||||
inpL = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.ln_f_g, inpL),
|
||||
inpL = ggml_v2_add(ctx0,
|
||||
ggml_v2_mul(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.ln_f_g, inpL),
|
||||
inpL),
|
||||
ggml_repeat(ctx0, model.ln_f_b, inpL));
|
||||
ggml_v2_repeat(ctx0, model.ln_f_b, inpL));
|
||||
}
|
||||
|
||||
// inpL = WTE * inpL
|
||||
// [ 768, 50257] - model.lm_head
|
||||
// [ 768, N] - inpL
|
||||
inpL = ggml_mul_mat(ctx0, model.lm_head, inpL);
|
||||
inpL = ggml_v2_mul_mat(ctx0, model.lm_head, inpL);
|
||||
|
||||
// logits -> probs
|
||||
//inpL = ggml_soft_max_inplace(ctx0, inpL);
|
||||
//inpL = ggml_v2_soft_max_inplace(ctx0, inpL);
|
||||
|
||||
// run the computation
|
||||
ggml_build_forward_expand(&gf, inpL);
|
||||
ggml_graph_compute (ctx0, &gf);
|
||||
ggml_v2_build_forward_expand(&gf, inpL);
|
||||
ggml_v2_graph_compute (ctx0, &gf);
|
||||
|
||||
//if (n_past%100 == 0) {
|
||||
// ggml_graph_print (&gf);
|
||||
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
|
||||
// ggml_v2_graph_print (&gf);
|
||||
// ggml_v2_graph_dump_dot(&gf, NULL, "gpt-2.dot");
|
||||
//}
|
||||
|
||||
//embd_w.resize(n_vocab*N);
|
||||
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
||||
//memcpy(embd_w.data(), ggml_v2_get_data(inpL), sizeof(float)*n_vocab*N);
|
||||
|
||||
// return result just for the last token
|
||||
embd_w.resize(n_vocab);
|
||||
memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
||||
memcpy(embd_w.data(), (float *) ggml_v2_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
||||
|
||||
if (mem_per_token == 0) {
|
||||
mem_per_token = ggml_used_mem(ctx0)/N;
|
||||
mem_per_token = ggml_v2_used_mem(ctx0)/N;
|
||||
}
|
||||
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
|
||||
//printf("used_mem = %zu\n", ggml_v2_used_mem(ctx0));
|
||||
|
||||
ggml_free(ctx0);
|
||||
ggml_v2_free(ctx0);
|
||||
|
||||
return true;
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
#include "ggml.h"
|
||||
#include "ggml_v2.h"
|
||||
#include "otherarch.h"
|
||||
|
||||
#include "utils.h"
|
||||
|
@ -49,7 +49,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
|
||||
fin.read((char *) &hparams.ftype, sizeof(hparams.ftype));
|
||||
|
||||
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
||||
const int32_t qntvr = hparams.ftype / GGML_V2_QNT_VERSION_FACTOR;
|
||||
|
||||
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
||||
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
||||
|
@ -60,7 +60,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
printf("%s: ftype = %d\n", __func__, hparams.ftype);
|
||||
printf("%s: qntvr = %d\n", __func__, qntvr);
|
||||
|
||||
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
|
||||
hparams.ftype %= GGML_V2_QNT_VERSION_FACTOR;
|
||||
}
|
||||
|
||||
// load vocab
|
||||
|
@ -89,8 +89,8 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
|
||||
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
|
||||
// in order to save memory and also to speed up the computation
|
||||
ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype));
|
||||
if (wtype == GGML_TYPE_COUNT) {
|
||||
ggml_v2_type wtype = ggml_v2_ftype_to_ggml_v2_type((ggml_v2_ftype) (model.hparams.ftype));
|
||||
if (wtype == GGML_V2_TYPE_COUNT) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n",
|
||||
__func__, fname.c_str(), model.hparams.ftype);
|
||||
return ModelLoadResult::FAIL;
|
||||
|
@ -98,7 +98,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
|
||||
auto & ctx = model.ctx;
|
||||
|
||||
auto memory_type = GGML_TYPE_F16;
|
||||
auto memory_type = GGML_V2_TYPE_F16;
|
||||
|
||||
size_t ctx_size = 0;
|
||||
|
||||
|
@ -110,31 +110,31 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_g
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_b
|
||||
ctx_size += n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32); // ln_f_g
|
||||
ctx_size += n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32); // ln_f_b
|
||||
|
||||
ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // wte
|
||||
ctx_size += n_embd*n_vocab*ggml_v2_type_sizef(wtype); // wte
|
||||
|
||||
ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // lmh_g
|
||||
ctx_size += n_vocab*ggml_type_sizef(GGML_TYPE_F32); // lmh_b
|
||||
ctx_size += n_embd*n_vocab*ggml_v2_type_sizef(wtype); // lmh_g
|
||||
ctx_size += n_vocab*ggml_v2_type_sizef(GGML_V2_TYPE_F32); // lmh_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_g
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_b
|
||||
ctx_size += n_layer*(n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // ln_1_g
|
||||
ctx_size += n_layer*(n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // ln_1_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_q_proj_w
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_k_proj_w
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_v_proj_w
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_attn_q_proj_w
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_attn_k_proj_w
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_attn_v_proj_w
|
||||
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_proj_w
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_attn_proj_w
|
||||
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_fc_w
|
||||
ctx_size += n_layer*( 4*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_fc_b
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_mlp_fc_w
|
||||
ctx_size += n_layer*( 4*n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // c_mlp_fc_b
|
||||
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
|
||||
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_mlp_proj_w
|
||||
ctx_size += n_layer*( n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // c_mlp_proj_b
|
||||
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_v2_type_sizef(memory_type); // memory_k
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_v2_type_sizef(memory_type); // memory_v
|
||||
|
||||
ctx_size += (5 + 10*n_layer)*512; // object overhead
|
||||
|
||||
|
@ -143,15 +143,15 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
|
||||
// create the ggml context
|
||||
{
|
||||
struct ggml_init_params params;
|
||||
struct ggml_v2_init_params params;
|
||||
params.mem_size = ctx_size;
|
||||
params.mem_buffer = NULL;
|
||||
params.no_alloc = false;
|
||||
|
||||
|
||||
model.ctx = ggml_init(params);
|
||||
model.ctx = ggml_v2_init(params);
|
||||
if (!model.ctx) {
|
||||
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
||||
fprintf(stderr, "%s: ggml_v2_init() failed\n", __func__);
|
||||
return ModelLoadResult::FAIL;
|
||||
}
|
||||
}
|
||||
|
@ -166,13 +166,13 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
|
||||
model.layers.resize(n_layer);
|
||||
|
||||
model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
model.wte = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
|
||||
model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
model.ln_f_g = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
model.ln_f_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
model.lmh_g = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
model.lmh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_vocab);
|
||||
model.lmh_g = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
model.lmh_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_vocab);
|
||||
|
||||
// map by name
|
||||
model.tensors["transformer.wte.weight"] = model.wte;
|
||||
|
@ -186,20 +186,20 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
for (int i = 0; i < n_layer; ++i) {
|
||||
auto & layer = model.layers[i];
|
||||
|
||||
layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_1_g = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
layer.ln_1_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
layer.c_attn_q_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_k_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_v_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_q_proj_w = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_k_proj_w = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_v_proj_w = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
|
||||
layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_proj_w = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
|
||||
layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
|
||||
layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd);
|
||||
layer.c_mlp_fc_w = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
|
||||
layer.c_mlp_fc_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, 4*n_embd);
|
||||
|
||||
layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
||||
layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.c_mlp_proj_w = ggml_v2_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
||||
layer.c_mlp_proj_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
// map by name
|
||||
model.tensors["transformer.h." + std::to_string(i) + ".ln_1.weight"] = layer.ln_1_g;
|
||||
|
@ -230,10 +230,10 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
const int n_mem = n_layer*n_ctx;
|
||||
const int n_elements = n_embd*n_mem;
|
||||
|
||||
model.memory_k = ggml_new_tensor_1d(ctx, memory_type, n_elements);
|
||||
model.memory_v = ggml_new_tensor_1d(ctx, memory_type, n_elements);
|
||||
model.memory_k = ggml_v2_new_tensor_1d(ctx, memory_type, n_elements);
|
||||
model.memory_v = ggml_v2_new_tensor_1d(ctx, memory_type, n_elements);
|
||||
|
||||
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
|
||||
const size_t memory_size = ggml_v2_nbytes(model.memory_k) + ggml_v2_nbytes(model.memory_v);
|
||||
|
||||
printf("%s: memory_size = %8.2f MB, n_mem = %d\n", __func__, memory_size/1024.0/1024.0, n_mem);
|
||||
}
|
||||
|
@ -274,7 +274,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
}
|
||||
|
||||
auto tensor = model.tensors[name.data()];
|
||||
if (ggml_nelements(tensor) != nelements) {
|
||||
if (ggml_v2_nelements(tensor) != nelements) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
||||
return ModelLoadResult::FAIL;
|
||||
}
|
||||
|
@ -286,7 +286,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
if(tensor->ne[0]==ne[1] && tensor->ne[1]==ne[0] && should_transpose_layer(name))
|
||||
{
|
||||
printf("\nFound a transposed tensor. This could be an older or newer model. Retrying load...");
|
||||
ggml_free(ctx);
|
||||
ggml_v2_free(ctx);
|
||||
return ModelLoadResult::RETRY_LOAD;
|
||||
}
|
||||
else
|
||||
|
@ -300,21 +300,21 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
|
||||
// for debugging
|
||||
if (0) {
|
||||
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor));
|
||||
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_v2_type_name(ggml_v2_type(ttype)), ggml_v2_nbytes(tensor)/1024.0/1024.0, ggml_v2_nbytes(tensor));
|
||||
}
|
||||
|
||||
const size_t bpe = ggml_type_size(ggml_type(ttype));
|
||||
const size_t bpe = ggml_v2_type_size(ggml_v2_type(ttype));
|
||||
|
||||
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
|
||||
if ((nelements*bpe)/ggml_v2_blck_size(tensor->type) != ggml_v2_nbytes(tensor)) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
||||
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
||||
__func__, name.data(), ggml_v2_nbytes(tensor), nelements*bpe);
|
||||
return ModelLoadResult::FAIL;
|
||||
}
|
||||
|
||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_v2_nbytes(tensor));
|
||||
|
||||
//printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ttype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
|
||||
total_size += ggml_nbytes(tensor);
|
||||
//printf("%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ttype == 0 ? "float" : "f16", ggml_v2_nbytes(tensor)/1024.0/1024.0);
|
||||
total_size += ggml_v2_nbytes(tensor);
|
||||
if (++n_tensors % 8 == 0) {
|
||||
printf(".");
|
||||
fflush(stdout);
|
||||
|
@ -344,16 +344,16 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
|||
// for (int i = 0; i < n_gpu; ++i) {
|
||||
// const auto & layer = model.layers[i];
|
||||
|
||||
// ggml_cl_transform_tensor(layer.ln_1_g); vram_total += ggml_nbytes(layer.ln_1_g);
|
||||
// ggml_cl_transform_tensor(layer.ln_1_b); vram_total += ggml_nbytes(layer.ln_1_b);
|
||||
// ggml_cl_transform_tensor(layer.c_attn_q_proj_w); vram_total += ggml_nbytes(layer.c_attn_q_proj_w);
|
||||
// ggml_cl_transform_tensor(layer.c_attn_k_proj_w); vram_total += ggml_nbytes(layer.c_attn_k_proj_w);
|
||||
// ggml_cl_transform_tensor(layer.c_attn_v_proj_w); vram_total += ggml_nbytes(layer.c_attn_v_proj_w);
|
||||
// ggml_cl_transform_tensor(layer.c_attn_proj_w); vram_total += ggml_nbytes(layer.c_attn_proj_w);
|
||||
// ggml_cl_transform_tensor(layer.c_mlp_fc_w); vram_total += ggml_nbytes(layer.c_mlp_fc_w);
|
||||
// ggml_cl_transform_tensor(layer.c_mlp_fc_b); vram_total += ggml_nbytes(layer.c_mlp_fc_b);
|
||||
// ggml_cl_transform_tensor(layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||
// ggml_cl_transform_tensor(layer.c_mlp_proj_b); vram_total += ggml_nbytes(layer.c_mlp_proj_b);
|
||||
// ggml_v2_cl_transform_tensor(layer.ln_1_g); vram_total += ggml_v2_nbytes(layer.ln_1_g);
|
||||
// ggml_v2_cl_transform_tensor(layer.ln_1_b); vram_total += ggml_v2_nbytes(layer.ln_1_b);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_attn_q_proj_w); vram_total += ggml_v2_nbytes(layer.c_attn_q_proj_w);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_attn_k_proj_w); vram_total += ggml_v2_nbytes(layer.c_attn_k_proj_w);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_attn_v_proj_w); vram_total += ggml_v2_nbytes(layer.c_attn_v_proj_w);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_attn_proj_w); vram_total += ggml_v2_nbytes(layer.c_attn_proj_w);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_mlp_fc_w); vram_total += ggml_v2_nbytes(layer.c_mlp_fc_w);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_mlp_fc_b); vram_total += ggml_v2_nbytes(layer.c_mlp_fc_b);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_mlp_proj_w); vram_total += ggml_v2_nbytes(layer.c_mlp_proj_w);
|
||||
// ggml_v2_cl_transform_tensor(layer.c_mlp_proj_b); vram_total += ggml_v2_nbytes(layer.c_mlp_proj_b);
|
||||
// }
|
||||
|
||||
// fprintf(stderr, "%s: [opencl] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
||||
|
@ -420,193 +420,193 @@ bool gptj_eval(
|
|||
}
|
||||
}
|
||||
|
||||
struct ggml_init_params params;
|
||||
struct ggml_v2_init_params params;
|
||||
params.mem_size = buf_size;
|
||||
params.mem_buffer = buf;
|
||||
params.no_alloc = false;
|
||||
|
||||
|
||||
struct ggml_context * ctx0 = ggml_init(params);
|
||||
struct ggml_cgraph gf = {};
|
||||
struct ggml_v2_context * ctx0 = ggml_v2_init(params);
|
||||
struct ggml_v2_cgraph gf = {};
|
||||
gf.n_threads = n_threads;
|
||||
|
||||
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
|
||||
struct ggml_v2_tensor * embd = ggml_v2_new_tensor_1d(ctx0, GGML_V2_TYPE_I32, N);
|
||||
memcpy(embd->data, embd_inp.data(), N*ggml_v2_element_size(embd));
|
||||
|
||||
// wte
|
||||
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte, embd);
|
||||
struct ggml_v2_tensor * inpL = ggml_v2_get_rows(ctx0, model.wte, embd);
|
||||
|
||||
for (int il = 0; il < n_layer; ++il) {
|
||||
struct ggml_tensor * cur;
|
||||
struct ggml_v2_tensor * cur;
|
||||
|
||||
// norm
|
||||
{
|
||||
cur = ggml_norm(ctx0, inpL);
|
||||
cur = ggml_v2_norm(ctx0, inpL);
|
||||
|
||||
// cur = ln_1_g*cur + ln_1_b
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].ln_1_g, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_mul(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.layers[il].ln_1_g, cur),
|
||||
cur),
|
||||
ggml_repeat(ctx0, model.layers[il].ln_1_b, cur));
|
||||
ggml_v2_repeat(ctx0, model.layers[il].ln_1_b, cur));
|
||||
}
|
||||
|
||||
struct ggml_tensor * inpSA = cur;
|
||||
struct ggml_v2_tensor * inpSA = cur;
|
||||
|
||||
// self-attention
|
||||
{
|
||||
struct ggml_tensor * Qcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0);
|
||||
struct ggml_tensor * Kcur = ggml_rope_inplace(ctx0, ggml_reshape_3d(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0);
|
||||
struct ggml_v2_tensor * Qcur = ggml_v2_rope_inplace(ctx0, ggml_v2_reshape_3d(ctx0, ggml_v2_mul_mat(ctx0, model.layers[il].c_attn_q_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0);
|
||||
struct ggml_v2_tensor * Kcur = ggml_v2_rope_inplace(ctx0, ggml_v2_reshape_3d(ctx0, ggml_v2_mul_mat(ctx0, model.layers[il].c_attn_k_proj_w, cur), n_embd/n_head, n_head, N), n_past, n_rot, 0);
|
||||
|
||||
// store key and value to memory
|
||||
{
|
||||
struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_mul_mat(ctx0, model.layers[il].c_attn_v_proj_w, cur));
|
||||
struct ggml_v2_tensor * Vcur = ggml_v2_transpose(ctx0, ggml_v2_mul_mat(ctx0, model.layers[il].c_attn_v_proj_w, cur));
|
||||
|
||||
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
|
||||
struct ggml_tensor * v = ggml_view_2d(ctx0, model.memory_v, N, n_embd,
|
||||
( n_ctx)*ggml_element_size(model.memory_v),
|
||||
(il*n_ctx)*ggml_element_size(model.memory_v)*n_embd + n_past*ggml_element_size(model.memory_v));
|
||||
struct ggml_v2_tensor * k = ggml_v2_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_v2_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
|
||||
struct ggml_v2_tensor * v = ggml_v2_view_2d(ctx0, model.memory_v, N, n_embd,
|
||||
( n_ctx)*ggml_v2_element_size(model.memory_v),
|
||||
(il*n_ctx)*ggml_v2_element_size(model.memory_v)*n_embd + n_past*ggml_v2_element_size(model.memory_v));
|
||||
|
||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
|
||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
|
||||
ggml_v2_build_forward_expand(&gf, ggml_v2_cpy(ctx0, Kcur, k));
|
||||
ggml_v2_build_forward_expand(&gf, ggml_v2_cpy(ctx0, Vcur, v));
|
||||
}
|
||||
|
||||
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
|
||||
struct ggml_tensor * Q =
|
||||
ggml_permute(ctx0,
|
||||
struct ggml_v2_tensor * Q =
|
||||
ggml_v2_permute(ctx0,
|
||||
Qcur,
|
||||
0, 2, 1, 3);
|
||||
|
||||
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
|
||||
struct ggml_tensor * K =
|
||||
ggml_permute(ctx0,
|
||||
ggml_reshape_3d(ctx0,
|
||||
ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd),
|
||||
struct ggml_v2_tensor * K =
|
||||
ggml_v2_permute(ctx0,
|
||||
ggml_v2_reshape_3d(ctx0,
|
||||
ggml_v2_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_v2_element_size(model.memory_k)*n_embd),
|
||||
n_embd/n_head, n_head, n_past + N),
|
||||
0, 2, 1, 3);
|
||||
|
||||
// K * Q
|
||||
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
|
||||
struct ggml_v2_tensor * KQ = ggml_v2_mul_mat(ctx0, K, Q);
|
||||
|
||||
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
||||
struct ggml_tensor * KQ_scaled =
|
||||
ggml_scale_inplace(ctx0,
|
||||
struct ggml_v2_tensor * KQ_scaled =
|
||||
ggml_v2_scale_inplace(ctx0,
|
||||
KQ,
|
||||
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
|
||||
ggml_v2_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
|
||||
);
|
||||
|
||||
// KQ_masked = mask_past(KQ_scaled)
|
||||
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
|
||||
struct ggml_v2_tensor * KQ_masked = ggml_v2_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
|
||||
|
||||
// KQ = soft_max(KQ_masked)
|
||||
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
|
||||
struct ggml_v2_tensor * KQ_soft_max = ggml_v2_soft_max_inplace(ctx0, KQ_masked);
|
||||
|
||||
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
|
||||
struct ggml_tensor * V =
|
||||
ggml_view_3d(ctx0, model.memory_v,
|
||||
struct ggml_v2_tensor * V =
|
||||
ggml_v2_view_3d(ctx0, model.memory_v,
|
||||
n_past + N, n_embd/n_head, n_head,
|
||||
n_ctx*ggml_element_size(model.memory_v),
|
||||
n_ctx*ggml_element_size(model.memory_v)*n_embd/n_head,
|
||||
il*n_ctx*ggml_element_size(model.memory_v)*n_embd);
|
||||
n_ctx*ggml_v2_element_size(model.memory_v),
|
||||
n_ctx*ggml_v2_element_size(model.memory_v)*n_embd/n_head,
|
||||
il*n_ctx*ggml_v2_element_size(model.memory_v)*n_embd);
|
||||
|
||||
// KQV = transpose(V) * KQ_soft_max
|
||||
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
|
||||
struct ggml_v2_tensor * KQV = ggml_v2_mul_mat(ctx0, V, KQ_soft_max);
|
||||
|
||||
// KQV_merged = KQV.permute(0, 2, 1, 3)
|
||||
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
|
||||
struct ggml_v2_tensor * KQV_merged = ggml_v2_permute(ctx0, KQV, 0, 2, 1, 3);
|
||||
|
||||
// cur = KQV_merged.contiguous().view(n_embd, N)
|
||||
cur = ggml_cpy(ctx0,
|
||||
cur = ggml_v2_cpy(ctx0,
|
||||
KQV_merged,
|
||||
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
|
||||
ggml_v2_new_tensor_2d(ctx0, GGML_V2_TYPE_F32, n_embd, N));
|
||||
|
||||
// projection (no bias)
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
cur = ggml_v2_mul_mat(ctx0,
|
||||
model.layers[il].c_attn_proj_w,
|
||||
cur);
|
||||
}
|
||||
|
||||
struct ggml_tensor * inpFF = cur;
|
||||
struct ggml_v2_tensor * inpFF = cur;
|
||||
|
||||
// feed-forward network
|
||||
// this is independent of the self-attention result, so it could be done in parallel to the self-attention
|
||||
{
|
||||
// note here we pass inpSA instead of cur
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
cur = ggml_v2_mul_mat(ctx0,
|
||||
model.layers[il].c_mlp_fc_w,
|
||||
inpSA);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.layers[il].c_mlp_fc_b, cur),
|
||||
cur);
|
||||
|
||||
// GELU activation
|
||||
cur = ggml_gelu(ctx0, cur);
|
||||
cur = ggml_v2_gelu(ctx0, cur);
|
||||
|
||||
// projection
|
||||
// cur = proj_w*cur + proj_b
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
cur = ggml_v2_mul_mat(ctx0,
|
||||
model.layers[il].c_mlp_proj_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.layers[il].c_mlp_proj_b, cur),
|
||||
cur);
|
||||
}
|
||||
|
||||
// self-attention + FF
|
||||
cur = ggml_add(ctx0, cur, inpFF);
|
||||
cur = ggml_v2_add(ctx0, cur, inpFF);
|
||||
|
||||
// input for next layer
|
||||
inpL = ggml_add(ctx0, cur, inpL);
|
||||
inpL = ggml_v2_add(ctx0, cur, inpL);
|
||||
}
|
||||
|
||||
// norm
|
||||
{
|
||||
inpL = ggml_norm(ctx0, inpL);
|
||||
inpL = ggml_v2_norm(ctx0, inpL);
|
||||
|
||||
// inpL = ln_f_g*inpL + ln_f_b
|
||||
inpL = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.ln_f_g, inpL),
|
||||
inpL = ggml_v2_add(ctx0,
|
||||
ggml_v2_mul(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.ln_f_g, inpL),
|
||||
inpL),
|
||||
ggml_repeat(ctx0, model.ln_f_b, inpL));
|
||||
ggml_v2_repeat(ctx0, model.ln_f_b, inpL));
|
||||
}
|
||||
|
||||
// lm_head
|
||||
{
|
||||
inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL);
|
||||
inpL = ggml_v2_mul_mat(ctx0, model.lmh_g, inpL);
|
||||
|
||||
inpL = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.lmh_b, inpL),
|
||||
inpL = ggml_v2_add(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.lmh_b, inpL),
|
||||
inpL);
|
||||
}
|
||||
|
||||
// logits -> probs
|
||||
//inpL = ggml_soft_max_inplace(ctx0, inpL);
|
||||
//inpL = ggml_v2_soft_max_inplace(ctx0, inpL);
|
||||
|
||||
// run the computation
|
||||
ggml_build_forward_expand(&gf, inpL);
|
||||
ggml_graph_compute (ctx0, &gf);
|
||||
ggml_v2_build_forward_expand(&gf, inpL);
|
||||
ggml_v2_graph_compute (ctx0, &gf);
|
||||
|
||||
//if (n_past%100 == 0) {
|
||||
// ggml_graph_print (&gf);
|
||||
// ggml_graph_dump_dot(&gf, NULL, "gpt-j.dot");
|
||||
// ggml_v2_graph_print (&gf);
|
||||
// ggml_v2_graph_dump_dot(&gf, NULL, "gpt-j.dot");
|
||||
//}
|
||||
|
||||
//embd_w.resize(n_vocab*N);
|
||||
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
||||
//memcpy(embd_w.data(), ggml_v2_get_data(inpL), sizeof(float)*n_vocab*N);
|
||||
|
||||
// return result for just the last token
|
||||
embd_w.resize(n_vocab);
|
||||
memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
||||
memcpy(embd_w.data(), (float *) ggml_v2_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
||||
|
||||
if (mem_per_token == 0) {
|
||||
mem_per_token = ggml_used_mem(ctx0)/N;
|
||||
mem_per_token = ggml_v2_used_mem(ctx0)/N;
|
||||
}
|
||||
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
|
||||
//printf("used_mem = %zu\n", ggml_v2_used_mem(ctx0));
|
||||
|
||||
ggml_free(ctx0);
|
||||
ggml_v2_free(ctx0);
|
||||
|
||||
return true;
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,29 +1,29 @@
|
|||
#ifndef LLAMA_H
|
||||
#define LLAMA_H
|
||||
#ifndef LLAMA_V2_H
|
||||
#define LLAMA_V2_H
|
||||
|
||||
#include <stddef.h>
|
||||
#include <stdint.h>
|
||||
#include <stdbool.h>
|
||||
|
||||
#ifdef LLAMA_SHARED
|
||||
#ifdef LLAMA_V2_SHARED
|
||||
# if defined(_WIN32) && !defined(__MINGW32__)
|
||||
# ifdef LLAMA_BUILD
|
||||
# define LLAMA_API __declspec(dllexport)
|
||||
# ifdef LLAMA_V2_BUILD
|
||||
# define LLAMA_V2_API __declspec(dllexport)
|
||||
# else
|
||||
# define LLAMA_API __declspec(dllimport)
|
||||
# define LLAMA_V2_API __declspec(dllimport)
|
||||
# endif
|
||||
# else
|
||||
# define LLAMA_API __attribute__ ((visibility ("default")))
|
||||
# define LLAMA_V2_API __attribute__ ((visibility ("default")))
|
||||
# endif
|
||||
#else
|
||||
# define LLAMA_API
|
||||
# define LLAMA_V2_API
|
||||
#endif
|
||||
|
||||
#define LLAMA_FILE_VERSION 3
|
||||
#define LLAMA_FILE_MAGIC 'ggjt'
|
||||
#define LLAMA_FILE_MAGIC_UNVERSIONED 'ggml'
|
||||
#define LLAMA_SESSION_MAGIC 'ggsn'
|
||||
#define LLAMA_SESSION_VERSION 1
|
||||
#define LLAMA_V2_FILE_VERSION 3
|
||||
#define LLAMA_V2_FILE_MAGIC 'ggjt'
|
||||
#define LLAMA_V2_FILE_MAGIC_UNVERSIONED 'ggml'
|
||||
#define LLAMA_V2_SESSION_MAGIC 'ggsn'
|
||||
#define LLAMA_V2_SESSION_VERSION 1
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
|
@ -35,78 +35,78 @@ extern "C" {
|
|||
// TODO: show sample usage
|
||||
//
|
||||
|
||||
struct llama_context;
|
||||
struct llama_v2_context;
|
||||
|
||||
typedef int llama_token;
|
||||
typedef int llama_v2_token;
|
||||
|
||||
typedef struct llama_token_data {
|
||||
llama_token id; // token id
|
||||
typedef struct llama_v2_token_data {
|
||||
llama_v2_token id; // token id
|
||||
float logit; // log-odds of the token
|
||||
float p; // probability of the token
|
||||
} llama_token_data;
|
||||
} llama_v2_token_data;
|
||||
|
||||
typedef struct llama_token_data_array {
|
||||
llama_token_data * data;
|
||||
typedef struct llama_v2_token_data_array {
|
||||
llama_v2_token_data * data;
|
||||
size_t size;
|
||||
bool sorted;
|
||||
} llama_token_data_array;
|
||||
} llama_v2_token_data_array;
|
||||
|
||||
typedef void (*llama_progress_callback)(float progress, void *ctx);
|
||||
typedef void (*llama_v2_progress_callback)(float progress, void *ctx);
|
||||
|
||||
struct llama_context_params {
|
||||
struct llama_v2_context_params {
|
||||
int n_ctx; // text context
|
||||
int n_gpu_layers; // number of layers to store in VRAM
|
||||
int seed; // RNG seed, -1 for random
|
||||
|
||||
bool f16_kv; // use fp16 for KV cache
|
||||
bool logits_all; // the llama_eval() call computes all logits, not just the last one
|
||||
bool logits_all; // the llama_v2_eval() call computes all logits, not just the last one
|
||||
bool vocab_only; // only load the vocabulary, no weights
|
||||
bool use_mmap; // use mmap if possible
|
||||
bool use_mlock; // force system to keep model in RAM
|
||||
bool embedding; // embedding mode only
|
||||
|
||||
// called with a progress value between 0 and 1, pass NULL to disable
|
||||
llama_progress_callback progress_callback;
|
||||
llama_v2_progress_callback progress_callback;
|
||||
// context pointer passed to the progress callback
|
||||
void * progress_callback_user_data;
|
||||
};
|
||||
|
||||
// model file types
|
||||
enum llama_ftype {
|
||||
LLAMA_FTYPE_ALL_F32 = 0,
|
||||
LLAMA_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||
LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q4_3 = 6, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
||||
LLAMA_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
||||
enum llama_v2_ftype {
|
||||
LLAMA_V2_FTYPE_ALL_F32 = 0,
|
||||
LLAMA_V2_FTYPE_MOSTLY_F16 = 1, // except 1d tensors
|
||||
LLAMA_V2_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
|
||||
LLAMA_V2_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
|
||||
LLAMA_V2_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
|
||||
LLAMA_V2_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors
|
||||
LLAMA_V2_FTYPE_MOSTLY_Q4_3 = 6, // except 1d tensors
|
||||
LLAMA_V2_FTYPE_MOSTLY_Q8_0 = 7, // except 1d tensors
|
||||
LLAMA_V2_FTYPE_MOSTLY_Q5_0 = 8, // except 1d tensors
|
||||
LLAMA_V2_FTYPE_MOSTLY_Q5_1 = 9, // except 1d tensors
|
||||
};
|
||||
|
||||
LLAMA_API struct llama_context_params llama_context_default_params();
|
||||
LLAMA_V2_API struct llama_v2_context_params llama_v2_context_default_params();
|
||||
|
||||
LLAMA_API bool llama_mmap_supported();
|
||||
LLAMA_API bool llama_mlock_supported();
|
||||
LLAMA_V2_API bool llama_v2_mmap_supported();
|
||||
LLAMA_V2_API bool llama_v2_mlock_supported();
|
||||
|
||||
// Various functions for loading a ggml llama model.
|
||||
// Allocate (almost) all memory needed for the model.
|
||||
// Return NULL on failure
|
||||
LLAMA_API struct llama_context * llama_init_from_file(
|
||||
LLAMA_V2_API struct llama_v2_context * llama_v2_init_from_file(
|
||||
const char * path_model,
|
||||
struct llama_context_params params);
|
||||
struct llama_v2_context_params params);
|
||||
|
||||
// Frees all allocated memory
|
||||
LLAMA_API void llama_free(struct llama_context * ctx);
|
||||
LLAMA_V2_API void llama_v2_free(struct llama_v2_context * ctx);
|
||||
|
||||
// TODO: not great API - very likely to change
|
||||
// Returns 0 on success
|
||||
// nthread - how many threads to use. If <=0, will use std::thread::hardware_concurrency(), else the number given
|
||||
LLAMA_API int llama_model_quantize(
|
||||
LLAMA_V2_API int llama_v2_model_quantize(
|
||||
const char * fname_inp,
|
||||
const char * fname_out,
|
||||
enum llama_ftype ftype,
|
||||
enum llama_v2_ftype ftype,
|
||||
int nthread);
|
||||
|
||||
// Apply a LoRA adapter to a loaded model
|
||||
|
@ -115,42 +115,42 @@ extern "C" {
|
|||
// The model needs to be reloaded before applying a new adapter, otherwise the adapter
|
||||
// will be applied on top of the previous one
|
||||
// Returns 0 on success
|
||||
LLAMA_API int llama_apply_lora_from_file(
|
||||
struct llama_context * ctx,
|
||||
LLAMA_V2_API int llama_v2_apply_lora_from_file(
|
||||
struct llama_v2_context * ctx,
|
||||
const char * path_lora,
|
||||
const char * path_base_model,
|
||||
int n_threads);
|
||||
|
||||
// Returns the number of tokens in the KV cache
|
||||
LLAMA_API int llama_get_kv_cache_token_count(const struct llama_context * ctx);
|
||||
LLAMA_V2_API int llama_v2_get_kv_cache_token_count(const struct llama_v2_context * ctx);
|
||||
|
||||
// Sets the current rng seed.
|
||||
LLAMA_API void llama_set_rng_seed(struct llama_context * ctx, int seed);
|
||||
LLAMA_V2_API void llama_v2_set_rng_seed(struct llama_v2_context * ctx, int seed);
|
||||
|
||||
// Returns the maximum size in bytes of the state (rng, logits, embedding
|
||||
// and kv_cache) - will often be smaller after compacting tokens
|
||||
LLAMA_API size_t llama_get_state_size(const struct llama_context * ctx);
|
||||
LLAMA_V2_API size_t llama_v2_get_state_size(const struct llama_v2_context * ctx);
|
||||
|
||||
// Copies the state to the specified destination address.
|
||||
// Destination needs to have allocated enough memory.
|
||||
// Returns the number of bytes copied
|
||||
LLAMA_API size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst);
|
||||
LLAMA_V2_API size_t llama_v2_copy_state_data(struct llama_v2_context * ctx, uint8_t * dst);
|
||||
|
||||
// Set the state reading from the specified address
|
||||
// Returns the number of bytes read
|
||||
LLAMA_API size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src);
|
||||
LLAMA_V2_API size_t llama_v2_set_state_data(struct llama_v2_context * ctx, const uint8_t * src);
|
||||
|
||||
// Save/load session file
|
||||
LLAMA_API bool llama_load_session_file(struct llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out);
|
||||
LLAMA_API bool llama_save_session_file(struct llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count);
|
||||
LLAMA_V2_API bool llama_v2_load_session_file(struct llama_v2_context * ctx, const char * path_session, llama_v2_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out);
|
||||
LLAMA_V2_API bool llama_v2_save_session_file(struct llama_v2_context * ctx, const char * path_session, const llama_v2_token * tokens, size_t n_token_count);
|
||||
|
||||
// Run the llama inference to obtain the logits and probabilities for the next token.
|
||||
// tokens + n_tokens is the provided batch of new tokens to process
|
||||
// n_past is the number of tokens to use from previous eval calls
|
||||
// Returns 0 on success
|
||||
LLAMA_API int llama_eval(
|
||||
struct llama_context * ctx,
|
||||
const llama_token * tokens,
|
||||
LLAMA_V2_API int llama_v2_eval(
|
||||
struct llama_v2_context * ctx,
|
||||
const llama_v2_token * tokens,
|
||||
int n_tokens,
|
||||
int n_past,
|
||||
int n_threads);
|
||||
|
@ -160,101 +160,104 @@ extern "C" {
|
|||
// Returns the number of tokens on success, no more than n_max_tokens
|
||||
// Returns a negative number on failure - the number of tokens that would have been returned
|
||||
// TODO: not sure if correct
|
||||
LLAMA_API int llama_tokenize(
|
||||
struct llama_context * ctx,
|
||||
LLAMA_V2_API int llama_v2_tokenize(
|
||||
struct llama_v2_context * ctx,
|
||||
const char * text,
|
||||
llama_token * tokens,
|
||||
llama_v2_token * tokens,
|
||||
int n_max_tokens,
|
||||
bool add_bos);
|
||||
|
||||
LLAMA_API int llama_n_vocab(const struct llama_context * ctx);
|
||||
LLAMA_API int llama_n_ctx (const struct llama_context * ctx);
|
||||
LLAMA_API int llama_n_embd (const struct llama_context * ctx);
|
||||
|
||||
std::vector<llama_v2_token> legacy_llama_v2_tokenize(struct llama_v2_context * ctx, const std::string & text, bool add_bos);
|
||||
|
||||
// Token logits obtained from the last call to llama_eval()
|
||||
LLAMA_V2_API int llama_v2_n_vocab(const struct llama_v2_context * ctx);
|
||||
LLAMA_V2_API int llama_v2_n_ctx (const struct llama_v2_context * ctx);
|
||||
LLAMA_V2_API int llama_v2_n_embd (const struct llama_v2_context * ctx);
|
||||
|
||||
// Token logits obtained from the last call to llama_v2_eval()
|
||||
// The logits for the last token are stored in the last row
|
||||
// Can be mutated in order to change the probabilities of the next token
|
||||
// Rows: n_tokens
|
||||
// Cols: n_vocab
|
||||
LLAMA_API float * llama_get_logits(struct llama_context * ctx);
|
||||
LLAMA_V2_API float * llama_v2_get_logits(struct llama_v2_context * ctx);
|
||||
|
||||
// Get the embeddings for the input
|
||||
// shape: [n_embd] (1-dimensional)
|
||||
LLAMA_API float * llama_get_embeddings(struct llama_context * ctx);
|
||||
LLAMA_V2_API float * llama_v2_get_embeddings(struct llama_v2_context * ctx);
|
||||
|
||||
// Token Id -> String. Uses the vocabulary in the provided context
|
||||
LLAMA_API const char * llama_token_to_str(const struct llama_context * ctx, llama_token token);
|
||||
LLAMA_V2_API const char * llama_v2_token_to_str(const struct llama_v2_context * ctx, llama_v2_token token);
|
||||
|
||||
// Special tokens
|
||||
LLAMA_API llama_token llama_token_bos();
|
||||
LLAMA_API llama_token llama_token_eos();
|
||||
LLAMA_API llama_token llama_token_nl();
|
||||
LLAMA_V2_API llama_v2_token llama_v2_token_bos();
|
||||
LLAMA_V2_API llama_v2_token llama_v2_token_eos();
|
||||
LLAMA_V2_API llama_v2_token llama_v2_token_nl();
|
||||
|
||||
// Sampling functions
|
||||
|
||||
/// @details Repetition penalty described in CTRL academic paper https://arxiv.org/abs/1909.05858, with negative logit fix.
|
||||
LLAMA_API void llama_sample_repetition_penalty(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float penalty);
|
||||
LLAMA_V2_API void llama_v2_sample_repetition_penalty(struct llama_v2_context * ctx, llama_v2_token_data_array * candidates, const llama_v2_token * last_tokens, size_t last_tokens_size, float penalty);
|
||||
|
||||
/// @details Frequency and presence penalties described in OpenAI API https://platform.openai.com/docs/api-reference/parameter-details.
|
||||
LLAMA_API void llama_sample_frequency_and_presence_penalties(struct llama_context * ctx, llama_token_data_array * candidates, const llama_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence);
|
||||
LLAMA_V2_API void llama_v2_sample_frequency_and_presence_penalties(struct llama_v2_context * ctx, llama_v2_token_data_array * candidates, const llama_v2_token * last_tokens, size_t last_tokens_size, float alpha_frequency, float alpha_presence);
|
||||
|
||||
/// @details Sorts candidate tokens by their logits in descending order and calculate probabilities based on logits.
|
||||
LLAMA_API void llama_sample_softmax(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||
LLAMA_V2_API void llama_v2_sample_softmax(struct llama_v2_context * ctx, llama_v2_token_data_array * candidates);
|
||||
|
||||
/// @details Top-K sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||
LLAMA_API void llama_sample_top_k(struct llama_context * ctx, llama_token_data_array * candidates, int k, size_t min_keep);
|
||||
LLAMA_V2_API void llama_v2_sample_top_k(struct llama_v2_context * ctx, llama_v2_token_data_array * candidates, int k, size_t min_keep);
|
||||
|
||||
/// @details Nucleus sampling described in academic paper "The Curious Case of Neural Text Degeneration" https://arxiv.org/abs/1904.09751
|
||||
LLAMA_API void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
|
||||
LLAMA_V2_API void llama_v2_sample_top_p(struct llama_v2_context * ctx, llama_v2_token_data_array * candidates, float p, size_t min_keep);
|
||||
|
||||
/// @details Tail Free Sampling described in https://www.trentonbricken.com/Tail-Free-Sampling/.
|
||||
LLAMA_API void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array * candidates, float z, size_t min_keep);
|
||||
LLAMA_V2_API void llama_v2_sample_tail_free(struct llama_v2_context * ctx, llama_v2_token_data_array * candidates, float z, size_t min_keep);
|
||||
|
||||
/// @details Locally Typical Sampling implementation described in the paper https://arxiv.org/abs/2202.00666.
|
||||
LLAMA_API void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * candidates, float p, size_t min_keep);
|
||||
LLAMA_API void llama_sample_temperature(struct llama_context * ctx, llama_token_data_array * candidates, float temp);
|
||||
LLAMA_V2_API void llama_v2_sample_typical(struct llama_v2_context * ctx, llama_v2_token_data_array * candidates, float p, size_t min_keep);
|
||||
LLAMA_V2_API void llama_v2_sample_temperature(struct llama_v2_context * ctx, llama_v2_token_data_array * candidates, float temp);
|
||||
|
||||
/// @details Mirostat 1.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
||||
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||
/// @param candidates A vector of `llama_v2_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
||||
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
|
||||
/// @param m The number of tokens considered in the estimation of `s_hat`. This is an arbitrary value that is used to calculate `s_hat`, which in turn helps to calculate the value of `k`. In the paper, they use `m = 100`, but you can experiment with different values to see how it affects the performance of the algorithm.
|
||||
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
|
||||
LLAMA_API llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, int m, float * mu);
|
||||
LLAMA_V2_API llama_v2_token llama_v2_sample_token_mirostat(struct llama_v2_context * ctx, llama_v2_token_data_array * candidates, float tau, float eta, int m, float * mu);
|
||||
|
||||
/// @details Mirostat 2.0 algorithm described in the paper https://arxiv.org/abs/2007.14966. Uses tokens instead of words.
|
||||
/// @param candidates A vector of `llama_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||
/// @param candidates A vector of `llama_v2_token_data` containing the candidate tokens, their probabilities (p), and log-odds (logit) for the current position in the generated text.
|
||||
/// @param tau The target cross-entropy (or surprise) value you want to achieve for the generated text. A higher value corresponds to more surprising or less predictable text, while a lower value corresponds to less surprising or more predictable text.
|
||||
/// @param eta The learning rate used to update `mu` based on the error between the target and observed surprisal of the sampled word. A larger learning rate will cause `mu` to be updated more quickly, while a smaller learning rate will result in slower updates.
|
||||
/// @param mu Maximum cross-entropy. This value is initialized to be twice the target cross-entropy (`2 * tau`) and is updated in the algorithm based on the error between the target and observed surprisal.
|
||||
LLAMA_API llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu);
|
||||
LLAMA_V2_API llama_v2_token llama_v2_sample_token_mirostat_v2(struct llama_v2_context * ctx, llama_v2_token_data_array * candidates, float tau, float eta, float * mu);
|
||||
|
||||
/// @details Selects the token with the highest probability.
|
||||
LLAMA_API llama_token llama_sample_token_greedy(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||
LLAMA_V2_API llama_v2_token llama_v2_sample_token_greedy(struct llama_v2_context * ctx, llama_v2_token_data_array * candidates);
|
||||
|
||||
/// @details Randomly selects a token from the candidates based on their probabilities.
|
||||
LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||
LLAMA_V2_API llama_v2_token llama_v2_sample_token(struct llama_v2_context * ctx, llama_v2_token_data_array * candidates);
|
||||
|
||||
// Performance information
|
||||
LLAMA_API void llama_print_timings(struct llama_context * ctx);
|
||||
LLAMA_API void llama_reset_timings(struct llama_context * ctx);
|
||||
LLAMA_V2_API void llama_v2_print_timings(struct llama_v2_context * ctx);
|
||||
LLAMA_V2_API void llama_v2_reset_timings(struct llama_v2_context * ctx);
|
||||
|
||||
// Print system information
|
||||
LLAMA_API const char * llama_print_system_info(void);
|
||||
LLAMA_V2_API const char * llama_v2_print_system_info(void);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
// Internal API to be implemented by llama.cpp and used by tests/benchmarks only
|
||||
#ifdef LLAMA_API_INTERNAL
|
||||
#ifdef LLAMA_V2_API_INTERNAL
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
struct ggml_tensor;
|
||||
struct ggml_v2_tensor;
|
||||
|
||||
std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx);
|
||||
std::vector<std::pair<std::string, struct ggml_v2_tensor *>>& llama_v2_internal_get_tensor_map(struct llama_v2_context * ctx);
|
||||
|
||||
#endif
|
||||
|
||||
#endif // LLAMA_H
|
||||
#endif // LLAMA_V2_H
|
|
@ -1,4 +1,4 @@
|
|||
#include "ggml.h"
|
||||
#include "ggml_v2.h"
|
||||
#include "otherarch.h"
|
||||
|
||||
#include "utils.h"
|
||||
|
@ -55,7 +55,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
}
|
||||
fin.read((char *) &hparams.ftype, sizeof(hparams.ftype));
|
||||
|
||||
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
|
||||
const int32_t qntvr = hparams.ftype / GGML_V2_QNT_VERSION_FACTOR;
|
||||
|
||||
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
||||
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
||||
|
@ -67,7 +67,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
printf("%s: ftype = %d\n", __func__, hparams.ftype);
|
||||
printf("%s: qntvr = %d\n", __func__, qntvr);
|
||||
|
||||
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
|
||||
hparams.ftype %= GGML_V2_QNT_VERSION_FACTOR;
|
||||
}
|
||||
|
||||
// load vocab
|
||||
|
@ -89,8 +89,8 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
|
||||
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
|
||||
// in order to save memory and also to speed up the computation
|
||||
ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype) (model.hparams.ftype));
|
||||
if (wtype == GGML_TYPE_COUNT) {
|
||||
ggml_v2_type wtype = ggml_v2_ftype_to_ggml_v2_type((ggml_v2_ftype) (model.hparams.ftype));
|
||||
if (wtype == GGML_V2_TYPE_COUNT) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n",
|
||||
__func__, fname.c_str(), model.hparams.ftype);
|
||||
return ModelLoadResult::FAIL;
|
||||
|
@ -108,34 +108,34 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_g
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // ln_f_b
|
||||
ctx_size += n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32); // ln_f_g
|
||||
ctx_size += n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32); // ln_f_b
|
||||
|
||||
ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // wte
|
||||
ctx_size += n_embd*n_vocab*ggml_v2_type_sizef(wtype); // wte
|
||||
|
||||
ctx_size += n_embd*n_vocab*ggml_type_sizef(wtype); // lmh_g
|
||||
//ctx_size += n_vocab*ggml_type_sizef(GGML_TYPE_F32); // lmh_b
|
||||
ctx_size += n_embd*n_vocab*ggml_v2_type_sizef(wtype); // lmh_g
|
||||
//ctx_size += n_vocab*ggml_v2_type_sizef(GGML_V2_TYPE_F32); // lmh_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_g
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_1_b
|
||||
ctx_size += n_layer*(n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // ln_1_g
|
||||
ctx_size += n_layer*(n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // ln_1_b
|
||||
|
||||
ctx_size += n_layer*(3*n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_attn_w
|
||||
ctx_size += n_layer*( 3*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_attn_b
|
||||
ctx_size += n_layer*(3*n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_attn_attn_w
|
||||
ctx_size += n_layer*( 3*n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // c_attn_attn_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // c_attn_proj_w
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_attn_proj_b
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_attn_proj_w
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // c_attn_proj_b
|
||||
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_g
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ln_2_b
|
||||
ctx_size += n_layer*(n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // ln_2_g
|
||||
ctx_size += n_layer*(n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // ln_2_b
|
||||
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_fc_w
|
||||
ctx_size += n_layer*( 4*n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_fc_b
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_mlp_fc_w
|
||||
ctx_size += n_layer*( 4*n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // c_mlp_fc_b
|
||||
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_type_sizef(wtype)); // c_mlp_proj_w
|
||||
ctx_size += n_layer*( n_embd*ggml_type_sizef(GGML_TYPE_F32)); // c_mlp_proj_b
|
||||
ctx_size += n_layer*(4*n_embd*n_embd*ggml_v2_type_sizef(wtype)); // c_mlp_proj_w
|
||||
ctx_size += n_layer*( n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32)); // c_mlp_proj_b
|
||||
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_k
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(GGML_TYPE_F32); // memory_v
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32); // memory_k
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_v2_type_sizef(GGML_V2_TYPE_F32); // memory_v
|
||||
|
||||
ctx_size += (6 + 16*n_layer)*512; // object overhead
|
||||
|
||||
|
@ -144,14 +144,14 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
|
||||
// create the ggml context
|
||||
{
|
||||
struct ggml_init_params params;
|
||||
struct ggml_v2_init_params params;
|
||||
params.mem_size = ctx_size;
|
||||
params.mem_buffer = NULL;
|
||||
params.no_alloc = false;
|
||||
|
||||
model.ctx = ggml_init(params);
|
||||
model.ctx = ggml_v2_init(params);
|
||||
if (!model.ctx) {
|
||||
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
||||
fprintf(stderr, "%s: ggml_v2_init() failed\n", __func__);
|
||||
return ModelLoadResult::FAIL;
|
||||
}
|
||||
}
|
||||
|
@ -166,13 +166,13 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
|
||||
model.layers.resize(n_layer);
|
||||
|
||||
model.wte = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
model.wte = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
|
||||
model.ln_f_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
model.ln_f_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
model.ln_f_g = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
model.ln_f_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
model.lmh_g = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
//model.lmh_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_vocab);
|
||||
model.lmh_g = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
|
||||
//model.lmh_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_vocab);
|
||||
|
||||
// map by name
|
||||
model.tensors["gpt_neox.embed_in.weight"] = model.wte;
|
||||
|
@ -186,23 +186,23 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
for (int i = 0; i < n_layer; ++i) {
|
||||
auto & layer = model.layers[i];
|
||||
|
||||
layer.ln_1_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_1_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_1_g = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
layer.ln_1_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
layer.c_attn_attn_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd);
|
||||
layer.c_attn_attn_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 3*n_embd);
|
||||
layer.c_attn_attn_w = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, 3*n_embd);
|
||||
layer.c_attn_attn_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, 3*n_embd);
|
||||
|
||||
layer.c_attn_proj_w = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.c_attn_proj_w = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.c_attn_proj_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
layer.ln_2_g = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_2_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.ln_2_g = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
layer.ln_2_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
layer.c_mlp_fc_w = ggml_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
|
||||
layer.c_mlp_fc_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 4*n_embd);
|
||||
layer.c_mlp_fc_w = ggml_v2_new_tensor_2d(ctx, wtype, n_embd, 4*n_embd);
|
||||
layer.c_mlp_fc_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, 4*n_embd);
|
||||
|
||||
layer.c_mlp_proj_w = ggml_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
||||
layer.c_mlp_proj_b = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
layer.c_mlp_proj_w = ggml_v2_new_tensor_2d(ctx, wtype, 4*n_embd, n_embd);
|
||||
layer.c_mlp_proj_b = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_embd);
|
||||
|
||||
// map by name
|
||||
model.tensors["gpt_neox.layers." + std::to_string(i) + ".input_layernorm.weight"] = layer.ln_1_g;
|
||||
|
@ -236,10 +236,10 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
const int64_t n_mem = n_layer*n_ctx;
|
||||
const int64_t n_elements = n_embd*n_mem;
|
||||
|
||||
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
|
||||
model.memory_k = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F16, n_elements);
|
||||
model.memory_v = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F16, n_elements);
|
||||
|
||||
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
|
||||
const size_t memory_size = ggml_v2_nbytes(model.memory_k) + ggml_v2_nbytes(model.memory_v);
|
||||
|
||||
printf("%s: memory_size = %8.2f MB, n_mem = %" PRId64 "\n", __func__, memory_size/1024.0/1024.0, n_mem);
|
||||
}
|
||||
|
@ -280,7 +280,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
}
|
||||
|
||||
auto tensor = model.tensors[name.data()];
|
||||
if (ggml_nelements(tensor) != nelements) {
|
||||
if (ggml_v2_nelements(tensor) != nelements) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
||||
return ModelLoadResult::FAIL;
|
||||
}
|
||||
|
@ -293,20 +293,20 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
|
||||
// for debugging
|
||||
if (0) {
|
||||
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor)/1024.0/1024.0, ggml_nbytes(tensor));
|
||||
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1], ggml_v2_type_name(ggml_v2_type(ttype)), ggml_v2_nbytes(tensor)/1024.0/1024.0, ggml_v2_nbytes(tensor));
|
||||
}
|
||||
|
||||
size_t bpe = ggml_type_size(ggml_type(ttype));
|
||||
size_t bpe = ggml_v2_type_size(ggml_v2_type(ttype));
|
||||
|
||||
if(file_format==FileFormat::NEOX_1)
|
||||
{
|
||||
switch (ttype) {
|
||||
case 0: bpe = ggml_type_size(GGML_TYPE_F32); break;
|
||||
case 1: bpe = ggml_type_size(GGML_TYPE_F16); break;
|
||||
case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break;
|
||||
case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break;
|
||||
case 5: bpe = ggml_type_size(GGML_TYPE_Q4_2); assert(ne[0] % 64 == 0); break;
|
||||
case 6: bpe = ggml_type_size(GGML_TYPE_Q4_3); assert(ne[0] % 64 == 0); break;
|
||||
case 0: bpe = ggml_v2_type_size(GGML_V2_TYPE_F32); break;
|
||||
case 1: bpe = ggml_v2_type_size(GGML_V2_TYPE_F16); break;
|
||||
case 2: bpe = ggml_v2_type_size(GGML_V2_TYPE_Q4_0); assert(ne[0] % 64 == 0); break;
|
||||
case 3: bpe = ggml_v2_type_size(GGML_V2_TYPE_Q4_1); assert(ne[0] % 64 == 0); break;
|
||||
case 5: bpe = ggml_v2_type_size(GGML_V2_TYPE_Q4_2); assert(ne[0] % 64 == 0); break;
|
||||
case 6: bpe = ggml_v2_type_size(GGML_V2_TYPE_Q4_3); assert(ne[0] % 64 == 0); break;
|
||||
default:
|
||||
{
|
||||
fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ttype);
|
||||
|
@ -315,16 +315,16 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
};
|
||||
}
|
||||
|
||||
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
|
||||
if ((nelements*bpe)/ggml_v2_blck_size(tensor->type) != ggml_v2_nbytes(tensor)) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
||||
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
||||
ggml_free(ctx);
|
||||
__func__, name.data(), ggml_v2_nbytes(tensor), nelements*bpe);
|
||||
ggml_v2_free(ctx);
|
||||
return ModelLoadResult::RETRY_LOAD;
|
||||
}
|
||||
|
||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_v2_nbytes(tensor));
|
||||
|
||||
total_size += ggml_nbytes(tensor);
|
||||
total_size += ggml_v2_nbytes(tensor);
|
||||
if (++n_tensors % 8 == 0) {
|
||||
printf(".");
|
||||
fflush(stdout);
|
||||
|
@ -343,37 +343,37 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
|||
|
||||
|
||||
// feed-forward network
|
||||
ggml_tensor * gpt_neox_ff(
|
||||
ggml_v2_tensor * gpt_neox_ff(
|
||||
const gpt_neox_layer &layer,
|
||||
ggml_context * ctx0,
|
||||
ggml_tensor * inp) {
|
||||
ggml_tensor * cur = ggml_norm(ctx0, inp);
|
||||
ggml_v2_context * ctx0,
|
||||
ggml_v2_tensor * inp) {
|
||||
ggml_v2_tensor * cur = ggml_v2_norm(ctx0, inp);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, layer.ln_2_g, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_mul(ctx0,
|
||||
ggml_v2_repeat(ctx0, layer.ln_2_g, cur),
|
||||
cur),
|
||||
ggml_repeat(ctx0, layer.ln_2_b, cur));
|
||||
ggml_v2_repeat(ctx0, layer.ln_2_b, cur));
|
||||
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
cur = ggml_v2_mul_mat(ctx0,
|
||||
layer.c_mlp_fc_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, layer.c_mlp_fc_b, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_repeat(ctx0, layer.c_mlp_fc_b, cur),
|
||||
cur);
|
||||
|
||||
// GELU activation
|
||||
cur = ggml_gelu(ctx0, cur);
|
||||
cur = ggml_v2_gelu(ctx0, cur);
|
||||
|
||||
// projection
|
||||
// cur = proj_w*cur + proj_b
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
cur = ggml_v2_mul_mat(ctx0,
|
||||
layer.c_mlp_proj_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, layer.c_mlp_proj_b, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_repeat(ctx0, layer.c_mlp_proj_b, cur),
|
||||
cur);
|
||||
return cur;
|
||||
}
|
||||
|
@ -420,196 +420,196 @@ bool gpt_neox_eval(
|
|||
}
|
||||
}
|
||||
|
||||
struct ggml_init_params params;
|
||||
struct ggml_v2_init_params params;
|
||||
params.mem_size = buf_size;
|
||||
params.mem_buffer = buf;
|
||||
params.no_alloc = false;
|
||||
|
||||
|
||||
struct ggml_context * ctx0 = ggml_init(params);
|
||||
struct ggml_cgraph gf = {};
|
||||
struct ggml_v2_context * ctx0 = ggml_v2_init(params);
|
||||
struct ggml_v2_cgraph gf = {};
|
||||
gf.n_threads = n_threads;
|
||||
|
||||
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
memcpy(embd->data, embd_inp.data(), N*ggml_element_size(embd));
|
||||
struct ggml_v2_tensor * embd = ggml_v2_new_tensor_1d(ctx0, GGML_V2_TYPE_I32, N);
|
||||
memcpy(embd->data, embd_inp.data(), N*ggml_v2_element_size(embd));
|
||||
|
||||
// wte
|
||||
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte, embd);
|
||||
struct ggml_v2_tensor * inpL = ggml_v2_get_rows(ctx0, model.wte, embd);
|
||||
|
||||
for (int il = 0; il < n_layer; ++il) {
|
||||
struct ggml_tensor * cur;
|
||||
struct ggml_v2_tensor * cur;
|
||||
|
||||
// self-attention
|
||||
{
|
||||
{
|
||||
cur = ggml_norm(ctx0, inpL);
|
||||
cur = ggml_v2_norm(ctx0, inpL);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].ln_1_g, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_mul(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.layers[il].ln_1_g, cur),
|
||||
cur),
|
||||
ggml_repeat(ctx0, model.layers[il].ln_1_b, cur));
|
||||
ggml_v2_repeat(ctx0, model.layers[il].ln_1_b, cur));
|
||||
}
|
||||
|
||||
// compute QKV
|
||||
{
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
cur = ggml_v2_mul_mat(ctx0,
|
||||
model.layers[il].c_attn_attn_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0,
|
||||
ggml_repeat(ctx0, model.layers[il].c_attn_attn_b, cur),
|
||||
cur = ggml_v2_add(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.layers[il].c_attn_attn_b, cur),
|
||||
cur);
|
||||
}
|
||||
|
||||
struct ggml_tensor * Qcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 0*sizeof(float)*n_embd/n_head));
|
||||
struct ggml_tensor * Kcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 1*sizeof(float)*n_embd/n_head));
|
||||
struct ggml_tensor * Vcur = ggml_cont(ctx0, ggml_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 2*sizeof(float)*n_embd/n_head));
|
||||
struct ggml_v2_tensor * Qcur = ggml_v2_cont(ctx0, ggml_v2_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 0*sizeof(float)*n_embd/n_head));
|
||||
struct ggml_v2_tensor * Kcur = ggml_v2_cont(ctx0, ggml_v2_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 1*sizeof(float)*n_embd/n_head));
|
||||
struct ggml_v2_tensor * Vcur = ggml_v2_cont(ctx0, ggml_v2_view_3d(ctx0, cur, n_embd/n_head, n_head, N, cur->nb[1]/n_head, cur->nb[1], 2*sizeof(float)*n_embd/n_head));
|
||||
|
||||
// using mode = 2 for GPT-NeoX mode
|
||||
Qcur = ggml_rope_inplace(ctx0, Qcur, n_past, n_rot, 2);
|
||||
Kcur = ggml_rope_inplace(ctx0, Kcur, n_past, n_rot, 2);
|
||||
Qcur = ggml_v2_rope_inplace(ctx0, Qcur, n_past, n_rot, 2);
|
||||
Kcur = ggml_v2_rope_inplace(ctx0, Kcur, n_past, n_rot, 2);
|
||||
|
||||
// store key and value to memory
|
||||
{
|
||||
Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd, N));
|
||||
Vcur = ggml_v2_transpose(ctx0, ggml_v2_reshape_2d(ctx0, Vcur, n_embd, N));
|
||||
|
||||
struct ggml_tensor * k = ggml_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
|
||||
struct ggml_tensor * v = ggml_view_2d(ctx0, model.memory_v, N, n_embd,
|
||||
( n_ctx)*ggml_element_size(model.memory_v),
|
||||
(il*n_ctx)*ggml_element_size(model.memory_v)*n_embd + n_past*ggml_element_size(model.memory_v));
|
||||
struct ggml_v2_tensor * k = ggml_v2_view_1d(ctx0, model.memory_k, N*n_embd, (ggml_v2_element_size(model.memory_k)*n_embd)*(il*n_ctx + n_past));
|
||||
struct ggml_v2_tensor * v = ggml_v2_view_2d(ctx0, model.memory_v, N, n_embd,
|
||||
( n_ctx)*ggml_v2_element_size(model.memory_v),
|
||||
(il*n_ctx)*ggml_v2_element_size(model.memory_v)*n_embd + n_past*ggml_v2_element_size(model.memory_v));
|
||||
|
||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
|
||||
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
|
||||
ggml_v2_build_forward_expand(&gf, ggml_v2_cpy(ctx0, Kcur, k));
|
||||
ggml_v2_build_forward_expand(&gf, ggml_v2_cpy(ctx0, Vcur, v));
|
||||
}
|
||||
|
||||
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0, 2, 1, 3)
|
||||
struct ggml_tensor * Q =
|
||||
ggml_permute(ctx0,
|
||||
struct ggml_v2_tensor * Q =
|
||||
ggml_v2_permute(ctx0,
|
||||
Qcur,
|
||||
0, 2, 1, 3);
|
||||
|
||||
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1, 3)
|
||||
struct ggml_tensor * K =
|
||||
ggml_permute(ctx0,
|
||||
ggml_reshape_3d(ctx0,
|
||||
ggml_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(model.memory_k)*n_embd),
|
||||
struct ggml_v2_tensor * K =
|
||||
ggml_v2_permute(ctx0,
|
||||
ggml_v2_reshape_3d(ctx0,
|
||||
ggml_v2_view_1d(ctx0, model.memory_k, (n_past + N)*n_embd, il*n_ctx*ggml_v2_element_size(model.memory_k)*n_embd),
|
||||
n_embd/n_head, n_head, n_past + N),
|
||||
0, 2, 1, 3);
|
||||
|
||||
// K * Q
|
||||
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
|
||||
struct ggml_v2_tensor * KQ = ggml_v2_mul_mat(ctx0, K, Q);
|
||||
|
||||
// KQ_scaled = KQ / sqrt(n_embd/n_head)
|
||||
struct ggml_tensor * KQ_scaled =
|
||||
ggml_scale_inplace(ctx0,
|
||||
struct ggml_v2_tensor * KQ_scaled =
|
||||
ggml_v2_scale_inplace(ctx0,
|
||||
KQ,
|
||||
ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
|
||||
ggml_v2_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
|
||||
);
|
||||
|
||||
// KQ_masked = mask_past(KQ_scaled)
|
||||
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
|
||||
struct ggml_v2_tensor * KQ_masked = ggml_v2_diag_mask_inf_inplace(ctx0, KQ_scaled, n_past);
|
||||
|
||||
// KQ = soft_max(KQ_masked)
|
||||
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
|
||||
struct ggml_v2_tensor * KQ_soft_max = ggml_v2_soft_max_inplace(ctx0, KQ_masked);
|
||||
|
||||
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1, 2, 0, 3).contiguous()
|
||||
struct ggml_tensor * V =
|
||||
ggml_view_3d(ctx0, model.memory_v,
|
||||
struct ggml_v2_tensor * V =
|
||||
ggml_v2_view_3d(ctx0, model.memory_v,
|
||||
n_past + N, n_embd/n_head, n_head,
|
||||
n_ctx*ggml_element_size(model.memory_v),
|
||||
n_ctx*ggml_element_size(model.memory_v)*n_embd/n_head,
|
||||
il*n_ctx*ggml_element_size(model.memory_v)*n_embd);
|
||||
n_ctx*ggml_v2_element_size(model.memory_v),
|
||||
n_ctx*ggml_v2_element_size(model.memory_v)*n_embd/n_head,
|
||||
il*n_ctx*ggml_v2_element_size(model.memory_v)*n_embd);
|
||||
|
||||
// KQV = transpose(V) * KQ_soft_max
|
||||
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
|
||||
struct ggml_v2_tensor * KQV = ggml_v2_mul_mat(ctx0, V, KQ_soft_max);
|
||||
|
||||
// KQV_merged = KQV.permute(0, 2, 1, 3)
|
||||
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
|
||||
struct ggml_v2_tensor * KQV_merged = ggml_v2_permute(ctx0, KQV, 0, 2, 1, 3);
|
||||
|
||||
// cur = KQV_merged.contiguous().view(n_embd, N)
|
||||
cur = ggml_cpy(ctx0,
|
||||
cur = ggml_v2_cpy(ctx0,
|
||||
KQV_merged,
|
||||
ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
|
||||
ggml_v2_new_tensor_2d(ctx0, GGML_V2_TYPE_F32, n_embd, N));
|
||||
|
||||
// projection
|
||||
{
|
||||
cur = ggml_mul_mat(ctx0,
|
||||
cur = ggml_v2_mul_mat(ctx0,
|
||||
model.layers[il].c_attn_proj_w,
|
||||
cur);
|
||||
|
||||
cur = ggml_add(ctx0, ggml_repeat(ctx0, model.layers[il].c_attn_proj_b, cur), cur);
|
||||
cur = ggml_v2_add(ctx0, ggml_v2_repeat(ctx0, model.layers[il].c_attn_proj_b, cur), cur);
|
||||
}
|
||||
}
|
||||
|
||||
if (hparams.par_res == 0) {
|
||||
struct ggml_tensor * inpFF = ggml_add(ctx0, cur, inpL);
|
||||
struct ggml_v2_tensor * inpFF = ggml_v2_add(ctx0, cur, inpL);
|
||||
|
||||
cur = gpt_neox_ff(model.layers[il], ctx0, inpFF);
|
||||
|
||||
// input for next layer
|
||||
inpL = ggml_add(ctx0, cur, inpFF);
|
||||
inpL = ggml_v2_add(ctx0, cur, inpFF);
|
||||
} else {
|
||||
struct ggml_tensor * inpFF = cur;
|
||||
struct ggml_v2_tensor * inpFF = cur;
|
||||
|
||||
// this is independent of the self-attention result, so it could be done in parallel to the self-attention
|
||||
// note here we pass inpL instead of cur
|
||||
cur = gpt_neox_ff(model.layers[il], ctx0, inpL);
|
||||
|
||||
// layer input + FF
|
||||
cur = ggml_add(ctx0, cur, inpFF);
|
||||
cur = ggml_v2_add(ctx0, cur, inpFF);
|
||||
|
||||
// input for next layer
|
||||
inpL = ggml_add(ctx0, cur, inpL);
|
||||
inpL = ggml_v2_add(ctx0, cur, inpL);
|
||||
}
|
||||
}
|
||||
|
||||
// norm
|
||||
{
|
||||
inpL = ggml_norm(ctx0, inpL);
|
||||
inpL = ggml_v2_norm(ctx0, inpL);
|
||||
|
||||
// inpL = ln_f_g*inpL + ln_f_b
|
||||
inpL = ggml_add(ctx0,
|
||||
ggml_mul(ctx0,
|
||||
ggml_repeat(ctx0, model.ln_f_g, inpL),
|
||||
inpL = ggml_v2_add(ctx0,
|
||||
ggml_v2_mul(ctx0,
|
||||
ggml_v2_repeat(ctx0, model.ln_f_g, inpL),
|
||||
inpL),
|
||||
ggml_repeat(ctx0, model.ln_f_b, inpL));
|
||||
ggml_v2_repeat(ctx0, model.ln_f_b, inpL));
|
||||
}
|
||||
|
||||
// lm_head
|
||||
{
|
||||
inpL = ggml_mul_mat(ctx0, model.lmh_g, inpL);
|
||||
inpL = ggml_v2_mul_mat(ctx0, model.lmh_g, inpL);
|
||||
|
||||
//inpL = ggml_add(ctx0,
|
||||
// ggml_repeat(ctx0, model.lmh_b, inpL),
|
||||
//inpL = ggml_v2_add(ctx0,
|
||||
// ggml_v2_repeat(ctx0, model.lmh_b, inpL),
|
||||
// inpL);
|
||||
}
|
||||
|
||||
// logits -> probs
|
||||
//inpL = ggml_soft_max_inplace(ctx0, inpL);
|
||||
//inpL = ggml_v2_soft_max_inplace(ctx0, inpL);
|
||||
|
||||
// run the computation
|
||||
ggml_build_forward_expand(&gf, inpL);
|
||||
ggml_graph_compute (ctx0, &gf);
|
||||
ggml_v2_build_forward_expand(&gf, inpL);
|
||||
ggml_v2_graph_compute (ctx0, &gf);
|
||||
|
||||
//if (n_past%100 == 0) {
|
||||
// ggml_graph_print (&gf);
|
||||
// ggml_graph_dump_dot(&gf, NULL, "gpt-2.dot");
|
||||
// ggml_v2_graph_print (&gf);
|
||||
// ggml_v2_graph_dump_dot(&gf, NULL, "gpt-2.dot");
|
||||
//}
|
||||
|
||||
//embd_w.resize(n_vocab*N);
|
||||
//memcpy(embd_w.data(), ggml_get_data(inpL), sizeof(float)*n_vocab*N);
|
||||
//memcpy(embd_w.data(), ggml_v2_get_data(inpL), sizeof(float)*n_vocab*N);
|
||||
|
||||
// return result for just the last token
|
||||
embd_w.resize(n_vocab);
|
||||
memcpy(embd_w.data(), (float *) ggml_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
||||
memcpy(embd_w.data(), (float *) ggml_v2_get_data(inpL) + (n_vocab*(N-1)), sizeof(float)*n_vocab);
|
||||
|
||||
if (mem_per_token == 0) {
|
||||
mem_per_token = ggml_used_mem(ctx0)/N;
|
||||
mem_per_token = ggml_v2_used_mem(ctx0)/N;
|
||||
}
|
||||
//printf("used_mem = %zu\n", ggml_used_mem(ctx0));
|
||||
//printf("used_mem = %zu\n", ggml_v2_used_mem(ctx0));
|
||||
|
||||
ggml_free(ctx0);
|
||||
ggml_v2_free(ctx0);
|
||||
|
||||
return true;
|
||||
}
|
|
@ -46,6 +46,26 @@ struct gptj_layer {
|
|||
struct ggml_tensor * c_mlp_proj_w_trans; //for backwards compatibility
|
||||
struct ggml_tensor * c_mlp_proj_b;
|
||||
};
|
||||
struct gptj_layer_v2 {
|
||||
// normalization
|
||||
struct ggml_v2_tensor * ln_1_g;
|
||||
struct ggml_v2_tensor * ln_1_b;
|
||||
|
||||
// attention
|
||||
struct ggml_v2_tensor * c_attn_q_proj_w;
|
||||
struct ggml_v2_tensor * c_attn_k_proj_w;
|
||||
struct ggml_v2_tensor * c_attn_v_proj_w;
|
||||
|
||||
struct ggml_v2_tensor * c_attn_proj_w;
|
||||
|
||||
// ff
|
||||
struct ggml_v2_tensor * c_mlp_fc_w;
|
||||
struct ggml_v2_tensor * c_mlp_fc_b;
|
||||
|
||||
struct ggml_v2_tensor * c_mlp_proj_w;
|
||||
struct ggml_v2_tensor * c_mlp_proj_w_trans; //for backwards compatibility
|
||||
struct ggml_v2_tensor * c_mlp_proj_b;
|
||||
};
|
||||
struct gptj_layer_v1 {
|
||||
// normalization
|
||||
struct ggml_v1_tensor * ln_1_g;
|
||||
|
@ -90,6 +110,29 @@ struct gptj_model_v1 {
|
|||
std::map<std::string, struct ggml_v1_tensor *> tensors;
|
||||
};
|
||||
|
||||
struct gptj_model_v2 {
|
||||
gptj_hparams hparams;
|
||||
|
||||
// normalization
|
||||
struct ggml_v2_tensor * ln_f_g;
|
||||
struct ggml_v2_tensor * ln_f_b;
|
||||
|
||||
struct ggml_v2_tensor * wte; // position embedding
|
||||
|
||||
struct ggml_v2_tensor * lmh_g; // language model head
|
||||
struct ggml_v2_tensor * lmh_b; // language model bias
|
||||
|
||||
std::vector<gptj_layer> layers;
|
||||
|
||||
// key + value memory
|
||||
struct ggml_v2_tensor * memory_k;
|
||||
struct ggml_v2_tensor * memory_v;
|
||||
|
||||
//
|
||||
struct ggml_v2_context * ctx;
|
||||
std::map<std::string, struct ggml_v2_tensor *> tensors;
|
||||
};
|
||||
|
||||
struct gptj_model {
|
||||
gptj_hparams hparams;
|
||||
|
||||
|
@ -167,6 +210,50 @@ struct gpt2_v1_model {
|
|||
std::map<std::string, struct ggml_v1_tensor *> tensors;
|
||||
};
|
||||
|
||||
struct gpt2_layer_v2 {
|
||||
// normalization
|
||||
struct ggml_v2_tensor * ln_1_g;
|
||||
struct ggml_v2_tensor * ln_1_b;
|
||||
|
||||
struct ggml_v2_tensor * ln_2_g;
|
||||
struct ggml_v2_tensor * ln_2_b;
|
||||
|
||||
// attention
|
||||
struct ggml_v2_tensor * c_attn_attn_w;
|
||||
struct ggml_v2_tensor * c_attn_attn_b;
|
||||
|
||||
struct ggml_v2_tensor * c_attn_proj_w;
|
||||
struct ggml_v2_tensor * c_attn_proj_b;
|
||||
|
||||
// mlp
|
||||
struct ggml_v2_tensor * c_mlp_fc_w;
|
||||
struct ggml_v2_tensor * c_mlp_fc_b;
|
||||
|
||||
struct ggml_v2_tensor * c_mlp_proj_w;
|
||||
struct ggml_v2_tensor * c_mlp_proj_b;
|
||||
};
|
||||
|
||||
struct gpt2_v2_model {
|
||||
gpt2_hparams hparams;
|
||||
|
||||
// normalization
|
||||
struct ggml_v2_tensor * ln_f_g;
|
||||
struct ggml_v2_tensor * ln_f_b;
|
||||
|
||||
struct ggml_v2_tensor * wte; // position embedding
|
||||
struct ggml_v2_tensor * wpe; // token embedding
|
||||
struct ggml_v2_tensor * lm_head; // language model head
|
||||
|
||||
std::vector<gpt2_layer_v2> layers;
|
||||
|
||||
// key + value memory
|
||||
struct ggml_v2_tensor * memory_k;
|
||||
struct ggml_v2_tensor * memory_v;
|
||||
|
||||
//
|
||||
struct ggml_v2_context * ctx;
|
||||
std::map<std::string, struct ggml_v2_tensor *> tensors;
|
||||
};
|
||||
|
||||
struct gpt2_layer {
|
||||
// normalization
|
||||
|
@ -225,6 +312,53 @@ struct gpt_neox_hparams {
|
|||
int32_t ftype = 1;
|
||||
};
|
||||
|
||||
struct gpt_neox_layer_v2 {
|
||||
// pre normalization
|
||||
struct ggml_v2_tensor * ln_1_g;
|
||||
struct ggml_v2_tensor * ln_1_b;
|
||||
|
||||
// attention
|
||||
struct ggml_v2_tensor * c_attn_attn_w;
|
||||
struct ggml_v2_tensor * c_attn_attn_b;
|
||||
|
||||
struct ggml_v2_tensor * c_attn_proj_w;
|
||||
struct ggml_v2_tensor * c_attn_proj_b;
|
||||
|
||||
// post normalization
|
||||
struct ggml_v2_tensor * ln_2_g;
|
||||
struct ggml_v2_tensor * ln_2_b;
|
||||
|
||||
// ff
|
||||
struct ggml_v2_tensor * c_mlp_fc_w;
|
||||
struct ggml_v2_tensor * c_mlp_fc_b;
|
||||
|
||||
struct ggml_v2_tensor * c_mlp_proj_w;
|
||||
struct ggml_v2_tensor * c_mlp_proj_b;
|
||||
};
|
||||
|
||||
struct gpt_neox_v2_model {
|
||||
gpt_neox_hparams hparams;
|
||||
|
||||
// normalization
|
||||
struct ggml_v2_tensor * ln_f_g;
|
||||
struct ggml_v2_tensor * ln_f_b;
|
||||
|
||||
struct ggml_v2_tensor * wte; // position embedding
|
||||
|
||||
struct ggml_v2_tensor * lmh_g; // language model head
|
||||
//struct ggml_tensor * lmh_b; // language model bias
|
||||
|
||||
std::vector<gpt_neox_layer_v2> layers;
|
||||
|
||||
// key + value memory
|
||||
struct ggml_v2_tensor * memory_k;
|
||||
struct ggml_v2_tensor * memory_v;
|
||||
|
||||
//
|
||||
struct ggml_v2_context * ctx;
|
||||
std::map<std::string, struct ggml_v2_tensor *> tensors;
|
||||
};
|
||||
|
||||
struct gpt_neox_layer {
|
||||
// pre normalization
|
||||
struct ggml_tensor * ln_1_g;
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#include "otherarch.h"
|
||||
|
||||
#include "rwkv_v2.h"
|
||||
#include "ggml.h"
|
||||
#include "ggml_v2.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
@ -48,21 +48,21 @@ bool read_int32(FILE * file, int32_t * dest) {
|
|||
return true;
|
||||
}
|
||||
|
||||
#define GGML_TYPE_UNKNOWN GGML_TYPE_COUNT
|
||||
#define GGML_V2_TYPE_UNKNOWN GGML_V2_TYPE_COUNT
|
||||
|
||||
#define FORMAT_TYPE_COUNT 10
|
||||
|
||||
static const ggml_type FORMAT_TYPE_TO_GGML_TYPE[FORMAT_TYPE_COUNT] = {
|
||||
GGML_TYPE_F32,
|
||||
GGML_TYPE_F16,
|
||||
GGML_TYPE_Q4_0,
|
||||
GGML_TYPE_Q4_1,
|
||||
GGML_TYPE_UNKNOWN, // Unused
|
||||
GGML_TYPE_Q4_2,
|
||||
GGML_TYPE_UNKNOWN, // Unused
|
||||
GGML_TYPE_Q5_0,
|
||||
GGML_TYPE_Q5_1,
|
||||
GGML_TYPE_Q8_0
|
||||
static const ggml_v2_type FORMAT_TYPE_TO_GGML_V2_TYPE[FORMAT_TYPE_COUNT] = {
|
||||
GGML_V2_TYPE_F32,
|
||||
GGML_V2_TYPE_F16,
|
||||
GGML_V2_TYPE_Q4_0,
|
||||
GGML_V2_TYPE_Q4_1,
|
||||
GGML_V2_TYPE_UNKNOWN, // Unused
|
||||
GGML_V2_TYPE_Q4_2,
|
||||
GGML_V2_TYPE_UNKNOWN, // Unused
|
||||
GGML_V2_TYPE_Q5_0,
|
||||
GGML_V2_TYPE_Q5_1,
|
||||
GGML_V2_TYPE_Q8_0
|
||||
};
|
||||
|
||||
static int32_t format_name_to_format_type(const char * format_name) {
|
||||
|
@ -79,29 +79,29 @@ static int32_t format_name_to_format_type(const char * format_name) {
|
|||
// --- Model definition and loading utilities ---
|
||||
|
||||
struct rwkv_layer {
|
||||
struct ggml_tensor * ln1_weight;
|
||||
struct ggml_tensor * ln1_bias;
|
||||
struct ggml_v2_tensor * ln1_weight;
|
||||
struct ggml_v2_tensor * ln1_bias;
|
||||
|
||||
// RWKV, also called "attention" by the author.
|
||||
struct ggml_tensor * att_time_mix_k;
|
||||
struct ggml_tensor * att_time_mix_v;
|
||||
struct ggml_tensor * att_time_mix_r;
|
||||
struct ggml_tensor * att_time_first;
|
||||
struct ggml_tensor * att_time_decay;
|
||||
struct ggml_tensor * att_key;
|
||||
struct ggml_tensor * att_value;
|
||||
struct ggml_tensor * att_receptance;
|
||||
struct ggml_tensor * att_output;
|
||||
struct ggml_v2_tensor * att_time_mix_k;
|
||||
struct ggml_v2_tensor * att_time_mix_v;
|
||||
struct ggml_v2_tensor * att_time_mix_r;
|
||||
struct ggml_v2_tensor * att_time_first;
|
||||
struct ggml_v2_tensor * att_time_decay;
|
||||
struct ggml_v2_tensor * att_key;
|
||||
struct ggml_v2_tensor * att_value;
|
||||
struct ggml_v2_tensor * att_receptance;
|
||||
struct ggml_v2_tensor * att_output;
|
||||
|
||||
struct ggml_tensor * ln2_weight;
|
||||
struct ggml_tensor * ln2_bias;
|
||||
struct ggml_v2_tensor * ln2_weight;
|
||||
struct ggml_v2_tensor * ln2_bias;
|
||||
|
||||
// FFN.
|
||||
struct ggml_tensor * ffn_time_mix_k;
|
||||
struct ggml_tensor * ffn_time_mix_r;
|
||||
struct ggml_tensor * ffn_key;
|
||||
struct ggml_tensor * ffn_value;
|
||||
struct ggml_tensor * ffn_receptance;
|
||||
struct ggml_v2_tensor * ffn_time_mix_k;
|
||||
struct ggml_v2_tensor * ffn_time_mix_r;
|
||||
struct ggml_v2_tensor * ffn_key;
|
||||
struct ggml_v2_tensor * ffn_value;
|
||||
struct ggml_v2_tensor * ffn_receptance;
|
||||
};
|
||||
|
||||
struct rwkv_model {
|
||||
|
@ -111,23 +111,23 @@ struct rwkv_model {
|
|||
// 0 for float32, 1 for float16.
|
||||
int32_t data_type;
|
||||
|
||||
struct ggml_tensor * emb;
|
||||
struct ggml_v2_tensor * emb;
|
||||
|
||||
struct ggml_tensor * ln0_weight;
|
||||
struct ggml_tensor * ln0_bias;
|
||||
struct ggml_v2_tensor * ln0_weight;
|
||||
struct ggml_v2_tensor * ln0_bias;
|
||||
|
||||
std::vector<rwkv_layer> layers;
|
||||
|
||||
struct ggml_tensor * ln_out_weight;
|
||||
struct ggml_tensor * ln_out_bias;
|
||||
struct ggml_v2_tensor * ln_out_weight;
|
||||
struct ggml_v2_tensor * ln_out_bias;
|
||||
|
||||
struct ggml_tensor * head;
|
||||
struct ggml_v2_tensor * head;
|
||||
};
|
||||
|
||||
// Finds model parameter by key and sets it into dest.
|
||||
// If the parameter was not found, returns false.
|
||||
bool set_parameter(std::unordered_map<std::string, struct ggml_tensor *> * parameters, char * key, struct ggml_tensor ** dest) {
|
||||
struct ggml_tensor * parameter = (*parameters)[key];
|
||||
bool set_parameter(std::unordered_map<std::string, struct ggml_v2_tensor *> * parameters, char * key, struct ggml_v2_tensor ** dest) {
|
||||
struct ggml_v2_tensor * parameter = (*parameters)[key];
|
||||
RWKV_ASSERT_FALSE(parameter != NULL, "Parameter %s not found in model file", key);
|
||||
*dest = parameter;
|
||||
return true;
|
||||
|
@ -135,7 +135,7 @@ bool set_parameter(std::unordered_map<std::string, struct ggml_tensor *> * param
|
|||
|
||||
// Finds block parameter by block index and key and sets it into dest.
|
||||
// If the parameter was not found, returns false.
|
||||
bool set_block_parameter(std::unordered_map<std::string, struct ggml_tensor *> * parameters, int32_t block_index, char * key, struct ggml_tensor ** dest) {
|
||||
bool set_block_parameter(std::unordered_map<std::string, struct ggml_v2_tensor *> * parameters, int32_t block_index, char * key, struct ggml_v2_tensor ** dest) {
|
||||
char full_key[128];
|
||||
sprintf(full_key, "blocks.%d.%s", block_index, key);
|
||||
return set_parameter(parameters, full_key, dest);
|
||||
|
@ -167,28 +167,28 @@ void rwkv_max_impl(const int n_cols, float * dest, const float * src0, const flo
|
|||
}
|
||||
}
|
||||
|
||||
struct ggml_tensor * rwkv_exp(ggml_context * ctx, struct ggml_tensor * x) {
|
||||
return ggml_map_unary_f32(ctx, x, rwkv_exp_impl);
|
||||
struct ggml_v2_tensor * rwkv_exp(ggml_v2_context * ctx, struct ggml_v2_tensor * x) {
|
||||
return ggml_v2_map_unary_f32(ctx, x, rwkv_exp_impl);
|
||||
}
|
||||
|
||||
struct ggml_tensor * rwkv_1_minus_x(ggml_context * ctx, struct ggml_tensor * x) {
|
||||
return ggml_map_unary_f32(ctx, x, rwkv_1_minus_x_impl);
|
||||
struct ggml_v2_tensor * rwkv_1_minus_x(ggml_v2_context * ctx, struct ggml_v2_tensor * x) {
|
||||
return ggml_v2_map_unary_f32(ctx, x, rwkv_1_minus_x_impl);
|
||||
}
|
||||
|
||||
struct ggml_tensor * rwkv_sigmoid(ggml_context * ctx, struct ggml_tensor * x) {
|
||||
return ggml_map_unary_f32(ctx, x, rwkv_sigmoid_impl);
|
||||
struct ggml_v2_tensor * rwkv_sigmoid(ggml_v2_context * ctx, struct ggml_v2_tensor * x) {
|
||||
return ggml_v2_map_unary_f32(ctx, x, rwkv_sigmoid_impl);
|
||||
}
|
||||
|
||||
struct ggml_tensor * rwkv_max(ggml_context * ctx, struct ggml_tensor * x, struct ggml_tensor * y) {
|
||||
return ggml_map_binary_f32(ctx, x, y, rwkv_max_impl);
|
||||
struct ggml_v2_tensor * rwkv_max(ggml_v2_context * ctx, struct ggml_v2_tensor * x, struct ggml_v2_tensor * y) {
|
||||
return ggml_v2_map_binary_f32(ctx, x, y, rwkv_max_impl);
|
||||
}
|
||||
|
||||
struct ggml_tensor * rwkv_layer_norm(ggml_context * ctx, struct ggml_tensor * x, struct ggml_tensor * weight, struct ggml_tensor * bias) {
|
||||
struct ggml_v2_tensor * rwkv_layer_norm(ggml_v2_context * ctx, struct ggml_v2_tensor * x, struct ggml_v2_tensor * weight, struct ggml_v2_tensor * bias) {
|
||||
// LayerNorm in RWKV is `x = (x - mean(x)) / sqrt(variance(x) + 1e-5) * weight + bias`
|
||||
// Looks like ggml_norm does the first part, we only need to apply weight & bias.
|
||||
x = ggml_norm(ctx, x);
|
||||
x = ggml_mul(ctx, x, weight);
|
||||
x = ggml_add(ctx, x, bias);
|
||||
// Looks like ggml_v2_norm does the first part, we only need to apply weight & bias.
|
||||
x = ggml_v2_norm(ctx, x);
|
||||
x = ggml_v2_mul(ctx, x, weight);
|
||||
x = ggml_v2_add(ctx, x, bias);
|
||||
return x;
|
||||
}
|
||||
|
||||
|
@ -196,12 +196,12 @@ struct ggml_tensor * rwkv_layer_norm(ggml_context * ctx, struct ggml_tensor * x,
|
|||
|
||||
struct rwkv_context {
|
||||
struct rwkv_model * model;
|
||||
struct ggml_tensor * token_index;
|
||||
struct ggml_tensor * state;
|
||||
struct ggml_tensor ** state_parts;
|
||||
struct ggml_tensor * logits;
|
||||
struct ggml_context * ctx;
|
||||
struct ggml_cgraph * graph;
|
||||
struct ggml_v2_tensor * token_index;
|
||||
struct ggml_v2_tensor * state;
|
||||
struct ggml_v2_tensor ** state_parts;
|
||||
struct ggml_v2_tensor * logits;
|
||||
struct ggml_v2_context * ctx;
|
||||
struct ggml_v2_cgraph * graph;
|
||||
bool freed;
|
||||
float * state_in = 0; //stores input state, or use null for a new state
|
||||
float * state_out = 0; //stores address of output state buffer
|
||||
|
@ -267,13 +267,13 @@ struct rwkv_context * rwkv_init_from_file(const char * file_path, uint32_t n_thr
|
|||
size_t(256) * 1024 * 1024;
|
||||
|
||||
// Initialize ggml
|
||||
struct ggml_init_params params;
|
||||
struct ggml_v2_init_params params;
|
||||
params.mem_size = memory_required;
|
||||
params.mem_buffer = NULL;
|
||||
params.no_alloc = false;
|
||||
struct ggml_context * ctx = ggml_init(params);
|
||||
struct ggml_v2_context * ctx = ggml_v2_init(params);
|
||||
|
||||
std::unordered_map<std::string, struct ggml_tensor *> parameters;
|
||||
std::unordered_map<std::string, struct ggml_v2_tensor *> parameters;
|
||||
|
||||
while (true) {
|
||||
int32_t dim_count;
|
||||
|
@ -294,22 +294,22 @@ struct rwkv_context * rwkv_init_from_file(const char * file_path, uint32_t n_thr
|
|||
read_int32(file, &data_type);
|
||||
RWKV_ASSERT_NULL(data_type >= 0 && data_type < FORMAT_TYPE_COUNT, "Unsupported parameter data type %d", data_type);
|
||||
|
||||
ggml_type ggml_data_type = FORMAT_TYPE_TO_GGML_TYPE[data_type];
|
||||
ggml_v2_type ggml_v2_data_type = FORMAT_TYPE_TO_GGML_V2_TYPE[data_type];
|
||||
|
||||
RWKV_ASSERT_NULL(ggml_data_type != GGML_TYPE_UNKNOWN, "Unsupported parameter data type %d", data_type);
|
||||
RWKV_ASSERT_NULL(ggml_v2_data_type != GGML_V2_TYPE_UNKNOWN, "Unsupported parameter data type %d", data_type);
|
||||
|
||||
struct ggml_tensor * tensor;
|
||||
struct ggml_v2_tensor * tensor;
|
||||
|
||||
int32_t x = -1;
|
||||
int32_t y = -1;
|
||||
|
||||
if (dim_count == 1) {
|
||||
read_int32(file, &x);
|
||||
tensor = ggml_new_tensor_1d(ctx, ggml_data_type, x);
|
||||
tensor = ggml_v2_new_tensor_1d(ctx, ggml_v2_data_type, x);
|
||||
} else if (dim_count == 2) {
|
||||
read_int32(file, &x);
|
||||
read_int32(file, &y);
|
||||
tensor = ggml_new_tensor_2d(ctx, ggml_data_type, x, y);
|
||||
tensor = ggml_v2_new_tensor_2d(ctx, ggml_v2_data_type, x, y);
|
||||
} else {
|
||||
abort();
|
||||
}
|
||||
|
@ -317,7 +317,7 @@ struct rwkv_context * rwkv_init_from_file(const char * file_path, uint32_t n_thr
|
|||
std::string key(key_length, 0);
|
||||
RWKV_ASSERT_NULL(fread(&key[0], 1, key_length, file) == uint32_t(key_length), "Failed to read parameter key");
|
||||
|
||||
RWKV_ASSERT_NULL(fread(tensor->data, 1, ggml_nbytes(tensor), file) == ggml_nbytes(tensor), "Failed to read parameter data");
|
||||
RWKV_ASSERT_NULL(fread(tensor->data, 1, ggml_v2_nbytes(tensor), file) == ggml_v2_nbytes(tensor), "Failed to read parameter data");
|
||||
|
||||
parameters[key] = tensor;
|
||||
}
|
||||
|
@ -365,7 +365,7 @@ struct rwkv_context * rwkv_init_from_file(const char * file_path, uint32_t n_thr
|
|||
set_parameter(¶meters, "head.weight", &(model->head));
|
||||
|
||||
// Verify order of dimensions
|
||||
struct ggml_tensor * emb = model->emb;
|
||||
struct ggml_v2_tensor * emb = model->emb;
|
||||
RWKV_ASSERT_NULL(emb->n_dims == 2, "Unexpected dimension count of embedding matrix %d", emb->n_dims);
|
||||
RWKV_ASSERT_NULL(emb->ne[0] == model->n_embed, "Unexpected dimension of embedding matrix %lld", emb->ne[0]);
|
||||
RWKV_ASSERT_NULL(emb->ne[1] == model->n_vocab, "Unexpected dimension of embedding matrix %lld", emb->ne[1]);
|
||||
|
@ -374,17 +374,17 @@ struct rwkv_context * rwkv_init_from_file(const char * file_path, uint32_t n_thr
|
|||
int32_t n_layer = model->n_layer;
|
||||
|
||||
// Build graph
|
||||
struct ggml_tensor * state = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_layer * 5 * n_embed);
|
||||
struct ggml_v2_tensor * state = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_F32, n_layer * 5 * n_embed);
|
||||
|
||||
// x = self.w.emb.weight[token]
|
||||
struct ggml_tensor * token_index = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
|
||||
struct ggml_tensor * x = ggml_get_rows(ctx, model->emb, token_index);
|
||||
struct ggml_v2_tensor * token_index = ggml_v2_new_tensor_1d(ctx, GGML_V2_TYPE_I32, 1);
|
||||
struct ggml_v2_tensor * x = ggml_v2_get_rows(ctx, model->emb, token_index);
|
||||
|
||||
// x = self.layer_norm(x, self.w.blocks[0].ln0)
|
||||
x = rwkv_layer_norm(ctx, x, model->ln0_weight, model->ln0_bias);
|
||||
|
||||
// We collect parts of new state here. Each part is (n_embed) vector.
|
||||
struct ggml_tensor ** state_parts = new ggml_tensor * [n_layer * 5];
|
||||
struct ggml_v2_tensor ** state_parts = new ggml_v2_tensor * [n_layer * 5];
|
||||
|
||||
for (int i = 0; i < n_layer; i++) {
|
||||
auto layer = model->layers[i];
|
||||
|
@ -392,99 +392,99 @@ struct rwkv_context * rwkv_init_from_file(const char * file_path, uint32_t n_thr
|
|||
// RWKV/time mixing
|
||||
{
|
||||
// self.layer_norm(x, self.w.blocks[i].ln1)
|
||||
struct ggml_tensor * x0 = rwkv_layer_norm(ctx, x, layer.ln1_weight, layer.ln1_bias);
|
||||
struct ggml_v2_tensor * x0 = rwkv_layer_norm(ctx, x, layer.ln1_weight, layer.ln1_bias);
|
||||
// state[5 * i + 1]
|
||||
struct ggml_tensor * x_prev = ggml_view_1d(ctx, state, n_embed, (5 * i + 1) * n_embed * sizeof(float));
|
||||
struct ggml_v2_tensor * x_prev = ggml_v2_view_1d(ctx, state, n_embed, (5 * i + 1) * n_embed * sizeof(float));
|
||||
// xk = x * time_mix_k + state[5 * i + 1] * (1 - time_mix_k)
|
||||
// xv = x * time_mix_v + state[5 * i + 1] * (1 - time_mix_v)
|
||||
// xr = x * time_mix_r + state[5 * i + 1] * (1 - time_mix_r)
|
||||
struct ggml_tensor * xk = ggml_add(
|
||||
struct ggml_v2_tensor * xk = ggml_v2_add(
|
||||
ctx,
|
||||
ggml_mul(ctx, x0, layer.att_time_mix_k),
|
||||
ggml_mul(ctx, x_prev, rwkv_1_minus_x(ctx, layer.att_time_mix_k))
|
||||
ggml_v2_mul(ctx, x0, layer.att_time_mix_k),
|
||||
ggml_v2_mul(ctx, x_prev, rwkv_1_minus_x(ctx, layer.att_time_mix_k))
|
||||
);
|
||||
struct ggml_tensor * xv = ggml_add(
|
||||
struct ggml_v2_tensor * xv = ggml_v2_add(
|
||||
ctx,
|
||||
ggml_mul(ctx, x0, layer.att_time_mix_v),
|
||||
ggml_mul(ctx, x_prev, rwkv_1_minus_x(ctx, layer.att_time_mix_v))
|
||||
ggml_v2_mul(ctx, x0, layer.att_time_mix_v),
|
||||
ggml_v2_mul(ctx, x_prev, rwkv_1_minus_x(ctx, layer.att_time_mix_v))
|
||||
);
|
||||
struct ggml_tensor * xr = ggml_add(
|
||||
struct ggml_v2_tensor * xr = ggml_v2_add(
|
||||
ctx,
|
||||
ggml_mul(ctx, x0, layer.att_time_mix_r),
|
||||
ggml_mul(ctx, x_prev, rwkv_1_minus_x(ctx, layer.att_time_mix_r))
|
||||
ggml_v2_mul(ctx, x0, layer.att_time_mix_r),
|
||||
ggml_v2_mul(ctx, x_prev, rwkv_1_minus_x(ctx, layer.att_time_mix_r))
|
||||
);
|
||||
// state[5 * i + 1] = x
|
||||
state_parts[5 * i + 1] = x0;
|
||||
|
||||
// r = torch.sigmoid(rw @ xr)
|
||||
struct ggml_tensor * r = rwkv_sigmoid(
|
||||
struct ggml_v2_tensor * r = rwkv_sigmoid(
|
||||
ctx,
|
||||
ggml_mul_mat(ctx, layer.att_receptance, xr)
|
||||
ggml_v2_mul_mat(ctx, layer.att_receptance, xr)
|
||||
);
|
||||
// k = kw @ xk
|
||||
struct ggml_tensor * k = ggml_mul_mat(ctx, layer.att_key, xk);
|
||||
struct ggml_v2_tensor * k = ggml_v2_mul_mat(ctx, layer.att_key, xk);
|
||||
// v = vw @ xv
|
||||
struct ggml_tensor * v = ggml_mul_mat(ctx, layer.att_value, xv);
|
||||
struct ggml_v2_tensor * v = ggml_v2_mul_mat(ctx, layer.att_value, xv);
|
||||
|
||||
// aa = state[5 * i + 2]
|
||||
// bb = state[5 * i + 3]
|
||||
// pp = state[5 * i + 4]
|
||||
struct ggml_tensor * aa = ggml_view_1d(ctx, state, n_embed, (5 * i + 2) * n_embed * sizeof(float));
|
||||
struct ggml_tensor * bb = ggml_view_1d(ctx, state, n_embed, (5 * i + 3) * n_embed * sizeof(float));
|
||||
struct ggml_tensor * pp = ggml_view_1d(ctx, state, n_embed, (5 * i + 4) * n_embed * sizeof(float));
|
||||
struct ggml_v2_tensor * aa = ggml_v2_view_1d(ctx, state, n_embed, (5 * i + 2) * n_embed * sizeof(float));
|
||||
struct ggml_v2_tensor * bb = ggml_v2_view_1d(ctx, state, n_embed, (5 * i + 3) * n_embed * sizeof(float));
|
||||
struct ggml_v2_tensor * pp = ggml_v2_view_1d(ctx, state, n_embed, (5 * i + 4) * n_embed * sizeof(float));
|
||||
|
||||
// ww = time_first + k
|
||||
struct ggml_tensor * ww = ggml_add(ctx, layer.att_time_first, k);
|
||||
struct ggml_v2_tensor * ww = ggml_v2_add(ctx, layer.att_time_first, k);
|
||||
// qq = torch.maximum(pp, ww)
|
||||
struct ggml_tensor * qq = rwkv_max(ctx, pp, ww);
|
||||
struct ggml_v2_tensor * qq = rwkv_max(ctx, pp, ww);
|
||||
// e1 = torch.exp(pp - qq)
|
||||
struct ggml_tensor * e1 = rwkv_exp(ctx, ggml_sub(ctx, pp, qq));
|
||||
struct ggml_v2_tensor * e1 = rwkv_exp(ctx, ggml_v2_sub(ctx, pp, qq));
|
||||
// e2 = torch.exp(ww - qq)
|
||||
struct ggml_tensor * e2 = rwkv_exp(ctx, ggml_sub(ctx, ww, qq));
|
||||
struct ggml_v2_tensor * e2 = rwkv_exp(ctx, ggml_v2_sub(ctx, ww, qq));
|
||||
// a = e1 * aa + e2 * v
|
||||
struct ggml_tensor * a = ggml_add(
|
||||
struct ggml_v2_tensor * a = ggml_v2_add(
|
||||
ctx,
|
||||
ggml_mul(ctx, e1, aa),
|
||||
ggml_mul(ctx, e2, v)
|
||||
ggml_v2_mul(ctx, e1, aa),
|
||||
ggml_v2_mul(ctx, e2, v)
|
||||
);
|
||||
// b = e1 * bb + e2
|
||||
struct ggml_tensor * b = ggml_add(
|
||||
struct ggml_v2_tensor * b = ggml_v2_add(
|
||||
ctx,
|
||||
ggml_mul(ctx, e1, bb),
|
||||
ggml_v2_mul(ctx, e1, bb),
|
||||
e2
|
||||
);
|
||||
// wkv = a / b
|
||||
struct ggml_tensor * wkv = ggml_div(ctx, a, b);
|
||||
struct ggml_v2_tensor * wkv = ggml_v2_div(ctx, a, b);
|
||||
// ww = pp + time_decay
|
||||
ww = ggml_add(ctx, pp, layer.att_time_decay);
|
||||
ww = ggml_v2_add(ctx, pp, layer.att_time_decay);
|
||||
// qq = torch.maximum(ww, k)
|
||||
qq = rwkv_max(ctx, ww, k);
|
||||
// e1 = torch.exp(ww - qq)
|
||||
e1 = rwkv_exp(ctx, ggml_sub(ctx, ww, qq));
|
||||
e1 = rwkv_exp(ctx, ggml_v2_sub(ctx, ww, qq));
|
||||
// e2 = torch.exp(k - qq)
|
||||
e2 = rwkv_exp(ctx, ggml_sub(ctx, k, qq));
|
||||
e2 = rwkv_exp(ctx, ggml_v2_sub(ctx, k, qq));
|
||||
// state[5 * i + 2] = e1 * aa + e2 * v
|
||||
state_parts[5 * i + 2] = ggml_add(
|
||||
state_parts[5 * i + 2] = ggml_v2_add(
|
||||
ctx,
|
||||
ggml_mul(ctx, e1, aa),
|
||||
ggml_mul(ctx, e2, v)
|
||||
ggml_v2_mul(ctx, e1, aa),
|
||||
ggml_v2_mul(ctx, e2, v)
|
||||
);
|
||||
// state[5 * i + 3] = e1 * bb + e2
|
||||
state_parts[5 * i + 3] = ggml_add(
|
||||
state_parts[5 * i + 3] = ggml_v2_add(
|
||||
ctx,
|
||||
ggml_mul(ctx, e1, bb),
|
||||
ggml_v2_mul(ctx, e1, bb),
|
||||
e2
|
||||
);
|
||||
// state[5 * i + 4] = qq
|
||||
state_parts[5 * i + 4] = qq;
|
||||
// ow @ (r * wkv)
|
||||
x = ggml_add(
|
||||
x = ggml_v2_add(
|
||||
ctx,
|
||||
x,
|
||||
ggml_mul_mat(
|
||||
ggml_v2_mul_mat(
|
||||
ctx,
|
||||
layer.att_output,
|
||||
ggml_mul(ctx, r, wkv)
|
||||
ggml_v2_mul(ctx, r, wkv)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -492,42 +492,42 @@ struct rwkv_context * rwkv_init_from_file(const char * file_path, uint32_t n_thr
|
|||
// FFN/channel mixing
|
||||
{
|
||||
// self.layer_norm(x, self.w.blocks[i].ln2)
|
||||
struct ggml_tensor * x0 = rwkv_layer_norm(ctx, x, layer.ln2_weight, layer.ln2_bias);
|
||||
struct ggml_v2_tensor * x0 = rwkv_layer_norm(ctx, x, layer.ln2_weight, layer.ln2_bias);
|
||||
// state[5 * i + 0]
|
||||
struct ggml_tensor * x_prev = ggml_view_1d(ctx, state, n_embed, (5 * i + 0) * n_embed * sizeof(float));
|
||||
struct ggml_v2_tensor * x_prev = ggml_v2_view_1d(ctx, state, n_embed, (5 * i + 0) * n_embed * sizeof(float));
|
||||
// xk = x * time_mix_k + state[5 * i + 0] * (1 - time_mix_k)
|
||||
// xr = x * time_mix_r + state[5 * i + 0] * (1 - time_mix_r)
|
||||
struct ggml_tensor * xk = ggml_add(
|
||||
struct ggml_v2_tensor * xk = ggml_v2_add(
|
||||
ctx,
|
||||
ggml_mul(ctx, x0, layer.ffn_time_mix_k),
|
||||
ggml_mul(ctx, x_prev, rwkv_1_minus_x(ctx, layer.ffn_time_mix_k))
|
||||
ggml_v2_mul(ctx, x0, layer.ffn_time_mix_k),
|
||||
ggml_v2_mul(ctx, x_prev, rwkv_1_minus_x(ctx, layer.ffn_time_mix_k))
|
||||
);
|
||||
struct ggml_tensor * xr = ggml_add(
|
||||
struct ggml_v2_tensor * xr = ggml_v2_add(
|
||||
ctx,
|
||||
ggml_mul(ctx, x0, layer.ffn_time_mix_r),
|
||||
ggml_mul(ctx, x_prev, rwkv_1_minus_x(ctx, layer.ffn_time_mix_r))
|
||||
ggml_v2_mul(ctx, x0, layer.ffn_time_mix_r),
|
||||
ggml_v2_mul(ctx, x_prev, rwkv_1_minus_x(ctx, layer.ffn_time_mix_r))
|
||||
);
|
||||
// state[5 * i + 0] = x
|
||||
state_parts[5 * i + 0] = x0;
|
||||
|
||||
// r = torch.sigmoid(rw @ xr)
|
||||
struct ggml_tensor * r = rwkv_sigmoid(
|
||||
struct ggml_v2_tensor * r = rwkv_sigmoid(
|
||||
ctx,
|
||||
ggml_mul_mat(ctx, layer.ffn_receptance, xr)
|
||||
ggml_v2_mul_mat(ctx, layer.ffn_receptance, xr)
|
||||
);
|
||||
// k = torch.square(torch.relu(kw @ xk))
|
||||
struct ggml_tensor * k = ggml_sqr(ctx, ggml_relu(
|
||||
struct ggml_v2_tensor * k = ggml_v2_sqr(ctx, ggml_v2_relu(
|
||||
ctx,
|
||||
ggml_mul_mat(ctx, layer.ffn_key, xk)
|
||||
ggml_v2_mul_mat(ctx, layer.ffn_key, xk)
|
||||
));
|
||||
// r * (vw @ k)
|
||||
x = ggml_add(
|
||||
x = ggml_v2_add(
|
||||
ctx,
|
||||
x,
|
||||
ggml_mul(
|
||||
ggml_v2_mul(
|
||||
ctx,
|
||||
r,
|
||||
ggml_mul_mat(ctx, layer.ffn_value, k)
|
||||
ggml_v2_mul_mat(ctx, layer.ffn_value, k)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
@ -537,14 +537,14 @@ struct rwkv_context * rwkv_init_from_file(const char * file_path, uint32_t n_thr
|
|||
x = rwkv_layer_norm(ctx, x, model->ln_out_weight, model->ln_out_bias);
|
||||
|
||||
// x = (self.w.head.weight @ x).float()
|
||||
struct ggml_tensor * logits = ggml_mul_mat(ctx, model->head, x);
|
||||
struct ggml_v2_tensor * logits = ggml_v2_mul_mat(ctx, model->head, x);
|
||||
|
||||
struct ggml_cgraph * graph = (struct ggml_cgraph *) calloc(1, sizeof(struct ggml_cgraph));
|
||||
struct ggml_v2_cgraph * graph = (struct ggml_v2_cgraph *) calloc(1, sizeof(struct ggml_v2_cgraph));
|
||||
|
||||
*graph = ggml_build_forward(logits);
|
||||
*graph = ggml_v2_build_forward(logits);
|
||||
|
||||
for (int i = 0; i < n_layer * 5; i++) {
|
||||
ggml_build_forward_expand(graph, state_parts[i]);
|
||||
ggml_v2_build_forward_expand(graph, state_parts[i]);
|
||||
}
|
||||
|
||||
graph->n_threads = n_threads;
|
||||
|
@ -578,15 +578,15 @@ bool rwkv_eval(struct rwkv_context * ctx, int32_t token, float * state_in, float
|
|||
|
||||
RWKV_ASSERT_FALSE(token >= 0 && token < n_vocab, "Token is out of range 0..%d", n_vocab - 1);
|
||||
|
||||
ggml_set_i32_1d(ctx->token_index, 0, token);
|
||||
ggml_v2_set_i32_1d(ctx->token_index, 0, token);
|
||||
|
||||
if (state_in == NULL) {
|
||||
ggml_set_f32(ctx->state, 0.0F);
|
||||
ggml_v2_set_f32(ctx->state, 0.0F);
|
||||
|
||||
for (int i = 0; i < n_layer; i++) {
|
||||
// state[5 * i + 4] = -1e30
|
||||
ggml_set_f32(
|
||||
ggml_view_1d(ctx->ctx, ctx->state, n_embed, (5 * i + 4) * n_embed * sizeof(float)),
|
||||
ggml_v2_set_f32(
|
||||
ggml_v2_view_1d(ctx->ctx, ctx->state, n_embed, (5 * i + 4) * n_embed * sizeof(float)),
|
||||
-1e30F
|
||||
);
|
||||
}
|
||||
|
@ -594,10 +594,10 @@ bool rwkv_eval(struct rwkv_context * ctx, int32_t token, float * state_in, float
|
|||
memcpy(ctx->state->data, state_in, ctx->state->ne[0] * sizeof(float));
|
||||
}
|
||||
|
||||
ggml_graph_compute(ctx->ctx, ctx->graph);
|
||||
ggml_v2_graph_compute(ctx->ctx, ctx->graph);
|
||||
|
||||
for (size_t i = 0; i < size_t(n_layer * 5); i++) {
|
||||
struct ggml_tensor * part = ctx->state_parts[i];
|
||||
struct ggml_v2_tensor * part = ctx->state_parts[i];
|
||||
|
||||
memcpy(state_out + i * n_embed, part->data, part->ne[0] * sizeof(float));
|
||||
}
|
||||
|
@ -611,7 +611,7 @@ void rwkv_free(struct rwkv_context * ctx) {
|
|||
ctx->model->layers.~vector();
|
||||
free(ctx->model);
|
||||
delete[] ctx->state_parts;
|
||||
ggml_free(ctx->ctx);
|
||||
ggml_v2_free(ctx->ctx);
|
||||
free(ctx->graph);
|
||||
free(ctx);
|
||||
}
|
||||
|
@ -621,15 +621,15 @@ bool rwkv_quantize_model_file(const char * model_file_path_in, const char * mode
|
|||
|
||||
RWKV_ASSERT_FALSE(format_type != -1, "Unsupported format \"%s\"", format_name);
|
||||
|
||||
ggml_type type = FORMAT_TYPE_TO_GGML_TYPE[format_type];
|
||||
ggml_v2_type type = FORMAT_TYPE_TO_GGML_V2_TYPE[format_type];
|
||||
|
||||
RWKV_ASSERT_FALSE(type != GGML_TYPE_UNKNOWN, "Unsupported format \"%s\"", format_name);
|
||||
RWKV_ASSERT_FALSE(type != GGML_V2_TYPE_UNKNOWN, "Unsupported format \"%s\"", format_name);
|
||||
|
||||
// Needed to initialize FP16 lookup table
|
||||
{
|
||||
struct ggml_init_params params = { 0, NULL, false };
|
||||
struct ggml_context * ctx = ggml_init(params);
|
||||
ggml_free(ctx);
|
||||
struct ggml_v2_init_params params = { 0, NULL, false };
|
||||
struct ggml_v2_context * ctx = ggml_v2_init(params);
|
||||
ggml_v2_free(ctx);
|
||||
}
|
||||
|
||||
printf("Loading model from '%s'\n", model_file_path_in);
|
||||
|
@ -680,7 +680,7 @@ bool rwkv_quantize_model_file(const char * model_file_path_in, const char * mode
|
|||
std::vector<float> work;
|
||||
|
||||
std::vector<uint8_t> data_u8;
|
||||
std::vector<ggml_fp16_t> data_f16;
|
||||
std::vector<ggml_v2_fp16_t> data_f16;
|
||||
std::vector<float> data_f32;
|
||||
|
||||
std::vector<int64_t> hist_all(1 << 4, 0);
|
||||
|
@ -700,9 +700,9 @@ bool rwkv_quantize_model_file(const char * model_file_path_in, const char * mode
|
|||
|
||||
RWKV_ASSERT_FALSE(parameter_data_type >= 0 && parameter_data_type < FORMAT_TYPE_COUNT, "Invalid parameter data type %d", parameter_data_type);
|
||||
|
||||
ggml_type parameter_ggml_type = FORMAT_TYPE_TO_GGML_TYPE[parameter_data_type];
|
||||
ggml_v2_type parameter_ggml_v2_type = FORMAT_TYPE_TO_GGML_V2_TYPE[parameter_data_type];
|
||||
|
||||
RWKV_ASSERT_FALSE(parameter_ggml_type != GGML_TYPE_UNKNOWN, "Invalid parameter data type %d", parameter_data_type);
|
||||
RWKV_ASSERT_FALSE(parameter_ggml_v2_type != GGML_V2_TYPE_UNKNOWN, "Invalid parameter data type %d", parameter_data_type);
|
||||
|
||||
int32_t nelements = 1;
|
||||
int32_t ne[2] = { 1, 1 };
|
||||
|
@ -715,9 +715,9 @@ bool rwkv_quantize_model_file(const char * model_file_path_in, const char * mode
|
|||
finp.read(&name[0], key_length);
|
||||
|
||||
{
|
||||
printf("%48s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ggml_type_name(parameter_ggml_type));
|
||||
printf("%48s - [%5d, %5d], type = %6s ", name.data(), ne[0], ne[1], ggml_v2_type_name(parameter_ggml_v2_type));
|
||||
|
||||
total_size_orig += (size_t) (nelements * ggml_type_sizef(parameter_ggml_type));
|
||||
total_size_orig += (size_t) (nelements * ggml_v2_type_sizef(parameter_ggml_v2_type));
|
||||
}
|
||||
|
||||
// Quantize only 2D tensors, except embedding and head matrices.
|
||||
|
@ -736,10 +736,10 @@ bool rwkv_quantize_model_file(const char * model_file_path_in, const char * mode
|
|||
|
||||
if (parameter_data_type == 1) {
|
||||
data_f16.resize(nelements);
|
||||
finp.read(reinterpret_cast<char *>(data_f16.data()), nelements * sizeof(ggml_fp16_t));
|
||||
finp.read(reinterpret_cast<char *>(data_f16.data()), nelements * sizeof(ggml_v2_fp16_t));
|
||||
data_f32.resize(nelements);
|
||||
for (int i = 0; i < nelements; ++i) {
|
||||
data_f32[i] = ggml_fp16_to_fp32(data_f16[i]);
|
||||
data_f32[i] = ggml_v2_fp16_to_fp32(data_f16[i]);
|
||||
}
|
||||
} else {
|
||||
data_f32.resize(nelements);
|
||||
|
@ -772,23 +772,23 @@ bool rwkv_quantize_model_file(const char * model_file_path_in, const char * mode
|
|||
std::vector<int64_t> hist_cur(1 << 4, 0);
|
||||
|
||||
switch (type) {
|
||||
case GGML_TYPE_Q4_0:
|
||||
cur_size = ggml_quantize_q4_0_v2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
||||
case GGML_V2_TYPE_Q4_0:
|
||||
cur_size = ggml_v2_quantize_q4_0_v2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
||||
break;
|
||||
case GGML_TYPE_Q4_1:
|
||||
cur_size = ggml_quantize_q4_1_v2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
||||
case GGML_V2_TYPE_Q4_1:
|
||||
cur_size = ggml_v2_quantize_q4_1_v2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
||||
break;
|
||||
case GGML_TYPE_Q4_2:
|
||||
cur_size = ggml_quantize_q4_2_v2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
||||
case GGML_V2_TYPE_Q4_2:
|
||||
cur_size = ggml_v2_quantize_q4_2_v2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
||||
break;
|
||||
case GGML_TYPE_Q5_0:
|
||||
cur_size = ggml_quantize_q5_0_v2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
||||
case GGML_V2_TYPE_Q5_0:
|
||||
cur_size = ggml_v2_quantize_q5_0_v2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
||||
break;
|
||||
case GGML_TYPE_Q5_1:
|
||||
cur_size = ggml_quantize_q5_1_v2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
||||
case GGML_V2_TYPE_Q5_1:
|
||||
cur_size = ggml_v2_quantize_q5_1_v2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
||||
break;
|
||||
case GGML_TYPE_Q8_0:
|
||||
cur_size = ggml_quantize_q8_0_v2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
||||
case GGML_V2_TYPE_Q8_0:
|
||||
cur_size = ggml_v2_quantize_q8_0_v2(data_f32.data(), work.data(), nelements, ne[0], hist_cur.data());
|
||||
break;
|
||||
default: {
|
||||
fprintf(stderr, "unsupported quantization type %d\n", type);
|
||||
|
@ -848,18 +848,18 @@ const char * rwkv_get_system_info_string(void) {
|
|||
static std::string s;
|
||||
|
||||
s = "";
|
||||
s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
|
||||
s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
|
||||
s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
|
||||
s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
|
||||
s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
|
||||
s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
|
||||
s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
|
||||
s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";
|
||||
s += "WASM_SIMD = " + std::to_string(ggml_cpu_has_wasm_simd()) + " | ";
|
||||
s += "BLAS = " + std::to_string(ggml_cpu_has_blas()) + " | ";
|
||||
s += "SSE3 = " + std::to_string(ggml_cpu_has_sse3()) + " | ";
|
||||
s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | ";
|
||||
s += "AVX = " + std::to_string(ggml_v2_cpu_has_avx()) + " | ";
|
||||
s += "AVX2 = " + std::to_string(ggml_v2_cpu_has_avx2()) + " | ";
|
||||
s += "AVX512 = " + std::to_string(ggml_v2_cpu_has_avx512()) + " | ";
|
||||
s += "FMA = " + std::to_string(ggml_v2_cpu_has_fma()) + " | ";
|
||||
s += "NEON = " + std::to_string(ggml_v2_cpu_has_neon()) + " | ";
|
||||
s += "ARM_FMA = " + std::to_string(ggml_v2_cpu_has_arm_fma()) + " | ";
|
||||
s += "F16C = " + std::to_string(ggml_v2_cpu_has_f16c()) + " | ";
|
||||
s += "FP16_VA = " + std::to_string(ggml_v2_cpu_has_fp16_va()) + " | ";
|
||||
s += "WASM_SIMD = " + std::to_string(ggml_v2_cpu_has_wasm_simd()) + " | ";
|
||||
s += "BLAS = " + std::to_string(ggml_v2_cpu_has_blas()) + " | ";
|
||||
s += "SSE3 = " + std::to_string(ggml_v2_cpu_has_sse3()) + " | ";
|
||||
s += "VSX = " + std::to_string(ggml_v2_cpu_has_vsx()) + " | ";
|
||||
|
||||
return s.c_str();
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue