added MPT support

This commit is contained in:
Concedo 2023-06-03 16:14:08 +08:00
parent 9839259b63
commit 6f82e17b7a
10 changed files with 983 additions and 48 deletions

View file

@ -1,6 +1,6 @@
default: koboldcpp koboldcpp_failsafe koboldcpp_openblas koboldcpp_openblas_noavx2 koboldcpp_clblast default: koboldcpp koboldcpp_failsafe koboldcpp_openblas koboldcpp_openblas_noavx2 koboldcpp_clblast
simple: koboldcpp koboldcpp_failsafe simple: koboldcpp koboldcpp_failsafe
tools: quantize_gpt2 quantize_gptj quantize_llama quantize_neox tools: quantize_gpt2 quantize_gptj quantize_llama quantize_neox quantize_mpt
dev: koboldcpp_openblas dev: koboldcpp_openblas
dev2: koboldcpp_clblast dev2: koboldcpp_clblast
@ -281,7 +281,7 @@ gpttype_adapter_clblast.o: gpttype_adapter.cpp
$(CXX) $(CXXFLAGS) $(CLBLAST_FLAGS) -c $< -o $@ $(CXX) $(CXXFLAGS) $(CLBLAST_FLAGS) -c $< -o $@
clean: clean:
rm -vf *.o main quantize_llama quantize_gpt2 quantize_gptj quantize_neox quantize-stats perplexity embedding benchmark-matmult save-load-state main.exe quantize_llama.exe quantize_gptj.exe quantize_gpt2.exe quantize_neox.exe koboldcpp.dll koboldcpp_openblas.dll koboldcpp_failsafe.dll koboldcpp_openblas_noavx2.dll koboldcpp_clblast.dll koboldcpp.so koboldcpp_openblas.so koboldcpp_failsafe.so koboldcpp_openblas_noavx2.so koboldcpp_clblast.so gptj.exe gpt2.exe rm -vf *.o main quantize_llama quantize_gpt2 quantize_gptj quantize_neox quantize_mpt quantize-stats perplexity embedding benchmark-matmult save-load-state main.exe quantize_llama.exe quantize_gptj.exe quantize_gpt2.exe quantize_neox.exe quantize_mpt.exe koboldcpp.dll koboldcpp_openblas.dll koboldcpp_failsafe.dll koboldcpp_openblas_noavx2.dll koboldcpp_clblast.dll koboldcpp.so koboldcpp_openblas.so koboldcpp_failsafe.so koboldcpp_openblas_noavx2.so koboldcpp_clblast.so
main: examples/main/main.cpp build-info.h ggml.o llama.o common.o $(OBJS) main: examples/main/main.cpp build-info.h ggml.o llama.o common.o $(OBJS)
$(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS)
@ -308,6 +308,8 @@ quantize_gpt2: ggml.o llama.o otherarch/tools/gpt2_quantize.cpp otherarch/tools/
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
quantize_neox: ggml.o llama.o otherarch/tools/neox_quantize.cpp otherarch/tools/common-ggml.cpp quantize_neox: ggml.o llama.o otherarch/tools/neox_quantize.cpp otherarch/tools/common-ggml.cpp
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
quantize_mpt: ggml.o llama.o otherarch/tools/mpt_quantize.cpp otherarch/tools/common-ggml.cpp
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
quantize-stats: examples/quantize-stats/quantize-stats.cpp ggml.o llama.o $(OBJS) quantize-stats: examples/quantize-stats/quantize-stats.cpp ggml.o llama.o $(OBJS)
$(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS)
perplexity: examples/perplexity/perplexity.cpp ggml.o llama.o common.o $(OBJS) perplexity: examples/perplexity/perplexity.cpp ggml.o llama.o common.o $(OBJS)

View file

@ -175,6 +175,19 @@ extern "C"
return true; return true;
} }
} }
else if(file_format==FileFormat::MPT_1)
{
printf("\n---\nIdentified as MPT model: (ver %d)\nAttempting to Load...\n---\n", file_format);
ModelLoadResult lr = gpttype_load_model(inputs, file_format);
if (lr == ModelLoadResult::FAIL || lr == ModelLoadResult::RETRY_LOAD)
{
return false;
}
else
{
return true;
}
}
else else
{ {
printf("\n---\nIdentified as LLAMA model: (ver %d)\nAttempting to Load...\n---\n", file_format); printf("\n---\nIdentified as LLAMA model: (ver %d)\nAttempting to Load...\n---\n", file_format);

View file

@ -26,6 +26,7 @@
#include "rwkv_v3.cpp" #include "rwkv_v3.cpp"
#include "neox_v2.cpp" #include "neox_v2.cpp"
#include "neox_v3.cpp" #include "neox_v3.cpp"
#include "mpt_v3.cpp"
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt) //return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
@ -44,6 +45,8 @@ static gpt2_model gpt2_ctx_v3;
static gpt_neox_v2_model neox_ctx_v2; static gpt_neox_v2_model neox_ctx_v2;
static gpt_neox_model neox_ctx_v3; static gpt_neox_model neox_ctx_v3;
static mpt_model mpt_ctx_v3;
static rwkv_v2_context * rwkv_ctx_v2; static rwkv_v2_context * rwkv_ctx_v2;
static rwkv_context * rwkv_ctx_v3; static rwkv_context * rwkv_ctx_v3;
static llama_v2_context_params llama_ctx_params_v2; static llama_v2_context_params llama_ctx_params_v2;
@ -298,7 +301,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
params.n_ctx = inputs.max_context_length; params.n_ctx = inputs.max_context_length;
neox_ctx_v2.hparams.n_ctx = gptj_ctx_v1.hparams.n_ctx = gptj_ctx_v2.hparams.n_ctx = gpt2_ctx_v1.hparams.n_ctx = gpt2_ctx_v2.hparams.n_ctx neox_ctx_v2.hparams.n_ctx = gptj_ctx_v1.hparams.n_ctx = gptj_ctx_v2.hparams.n_ctx = gpt2_ctx_v1.hparams.n_ctx = gpt2_ctx_v2.hparams.n_ctx
= neox_ctx_v3.hparams.n_ctx = gptj_ctx_v3.hparams.n_ctx = gptj_ctx_v3.hparams.n_ctx = params.n_ctx; = neox_ctx_v3.hparams.n_ctx = gptj_ctx_v3.hparams.n_ctx = gptj_ctx_v3.hparams.n_ctx = mpt_ctx_v3.hparams.n_ctx = params.n_ctx;
printf("System Info: %s\n", llama_print_system_info()); printf("System Info: %s\n", llama_print_system_info());
SetQuantsUnshuffled(false); SetQuantsUnshuffled(false);
@ -682,6 +685,19 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
} }
} }
else if(file_format==FileFormat::MPT_1)
{
bool res = mpt_model_load(params.model, mpt_ctx_v3, vocab);
if(res==false)
{
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
return ModelLoadResult::FAIL;
}
// determine the required inference memory per token:
mpt_eval(mpt_ctx_v3, params.n_threads, 0, { 0, 1, 2, 3 }, logits, false, mem_per_token);
return ModelLoadResult::SUCCESS;
}
else else
{ {
printf("\nUnknown Model, cannot load.\n"); printf("\nUnknown Model, cannot load.\n");
@ -869,6 +885,10 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
{ {
n_vocab = neox_ctx_v3.hparams.n_vocab; n_vocab = neox_ctx_v3.hparams.n_vocab;
} }
else if( file_format==FileFormat::MPT_1)
{
n_vocab = mpt_ctx_v3.hparams.n_vocab;
}
else if(file_format == FileFormat::RWKV_1 || file_format==FileFormat::RWKV_2) else if(file_format == FileFormat::RWKV_1 || file_format==FileFormat::RWKV_2)
{ {
n_vocab = vocab.id_to_token.size(); //handled seperately n_vocab = vocab.id_to_token.size(); //handled seperately
@ -1006,6 +1026,10 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
{ {
evalres = gptj_eval(gptj_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token); evalres = gptj_eval(gptj_ctx_v3, params.n_threads, n_past, embd, logits, mem_per_token);
} }
else if(file_format==FileFormat::MPT_1)
{
evalres = mpt_eval(mpt_ctx_v3, params.n_threads, n_past, embd, logits, false, mem_per_token);
}
else else
{ {
printf("\nCannot find eval function\n"); printf("\nCannot find eval function\n");
@ -1098,7 +1122,8 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
file_format == FileFormat::NEOX_4 || file_format == FileFormat::NEOX_4 ||
file_format == FileFormat::NEOX_5 || file_format == FileFormat::NEOX_5 ||
file_format == FileFormat::NEOX_6 || file_format == FileFormat::NEOX_6 ||
file_format == FileFormat::NEOX_7) file_format == FileFormat::NEOX_7 ||
file_format == FileFormat::MPT_1)
{ {
eosID = 0; eosID = 0;
int topid = std::min_element(logits.begin(),logits.end())-logits.begin(); int topid = std::min_element(logits.begin(),logits.end())-logits.begin();

View file

@ -98,7 +98,11 @@ void print_tok_vec(std::vector<float> &embd)
//we need to read more to determine //we need to read more to determine
int32_t vocabsiz = 0; int32_t vocabsiz = 0;
fin.read((char *) &vocabsiz, sizeof(int32_t)); fin.read((char *) &vocabsiz, sizeof(int32_t));
if(vocabsiz==50400) //know GPT-J vocab size if(vocabsiz==4096) //actually the d_model for mpt
{
fileformat = FileFormat::MPT_1;
}
else if(vocabsiz==50400) //know GPT-J vocab size
{ {
fileformat = FileFormat::GPTJ_1; fileformat = FileFormat::GPTJ_1;
uint32_t temp; uint32_t temp;

View file

@ -43,6 +43,8 @@ enum FileFormat
NEOX_5=404, //unshuffled redpajama NEOX_5=404, //unshuffled redpajama
NEOX_6=405, //using 16bit scalar NEOX_6=405, //using 16bit scalar
NEOX_7=406, //using 16bit scalar redpajama NEOX_7=406, //using 16bit scalar redpajama
MPT_1=500, //first supported mpt version
}; };
enum ModelLoadResult enum ModelLoadResult

516
otherarch/mpt_v3.cpp Normal file
View file

@ -0,0 +1,516 @@
#include "ggml.h"
#include "otherarch.h"
#include "utils.h"
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <fstream>
#include <map>
#include <string>
#include <vector>
#include <iostream>
#include "model_adapter.h"
// load the model's weights from a file
bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vocab) {
printf("%s: loading model from '%s' - please wait ...\n", __func__, fname.c_str());
auto fin = std::ifstream(fname, std::ios::binary);
if (!fin) {
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
return false;
}
// verify magic
{
uint32_t magic;
fin.read((char *)&magic, sizeof(magic));
if (magic != 0x67676d6c) {
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
return false;
}
}
// load hparams
{
auto & hparams = model.hparams;
fin.read((char *) &hparams.d_model, sizeof(hparams.d_model));
fin.read((char *) &hparams.max_seq_len, sizeof(hparams.max_seq_len));
fin.read((char *) &hparams.n_heads, sizeof(hparams.n_heads));
fin.read((char *) &hparams.n_layers, sizeof(hparams.n_layers));
fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
fin.read((char *) &hparams.alibi_bias_max, sizeof(hparams.alibi_bias_max));
fin.read((char *) &hparams.clip_qkv, sizeof(hparams.clip_qkv));
fin.read((char *) &hparams.ftype, sizeof(hparams.ftype));
hparams.n_ctx = std::min(hparams.max_seq_len, hparams.n_ctx);
const int32_t qntvr = hparams.ftype / GGML_QNT_VERSION_FACTOR;
printf("%s: d_model = %d\n", __func__, hparams.d_model);
printf("%s: max_seq_len = %d\n", __func__, hparams.max_seq_len);
printf("%s: n_ctx = %d\n", __func__, hparams.n_ctx);
printf("%s: n_heads = %d\n", __func__, hparams.n_heads);
printf("%s: n_layers = %d\n", __func__, hparams.n_layers);
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
printf("%s: alibi_bias_max = %f\n", __func__, hparams.alibi_bias_max);
printf("%s: clip_qkv = %f\n", __func__, hparams.clip_qkv);
printf("%s: ftype = %d\n", __func__, hparams.ftype);
printf("%s: qntvr = %d\n", __func__, qntvr);
hparams.ftype %= GGML_QNT_VERSION_FACTOR;
}
// load vocab
{
const int32_t n_vocab = model.hparams.n_vocab;
std::string word;
std::vector<char> buf(128);
for (int i = 0; i < n_vocab; i++) {
uint32_t len;
fin.read((char *) &len, sizeof(len));
buf.resize(len);
fin.read((char *) buf.data(), len);
word.assign(buf.data(), len);
vocab.token_to_id[word] = i;
vocab.id_to_token[i] = word;
}
}
// for the big tensors, we have the option to store the data in 16-bit
// floats or quantized in order to save memory and also to speed up the
// computation
ggml_type wtype = ggml_ftype_to_ggml_type((ggml_ftype)(model.hparams.ftype));
if (wtype == GGML_TYPE_COUNT) {
fprintf(stderr, "%s: invalid model file '%s' (bad ftype value %d)\n", __func__, fname.c_str(),
model.hparams.ftype);
return false;
}
auto & ctx = model.ctx;
size_t ctx_size = 0;
const auto & hparams = model.hparams;
const size_t n_ctx = hparams.n_ctx;
{
const size_t n_embd = hparams.d_model;
const size_t n_layer = hparams.n_layers;
const size_t n_vocab = hparams.n_vocab;
ctx_size += n_embd * n_vocab * ggml_type_sizef(wtype); // wte_weight
ctx_size += n_embd * ggml_type_sizef(GGML_TYPE_F32); // norm_f_weight
ctx_size += n_layer * (n_embd * ggml_type_sizef(GGML_TYPE_F32)); // ln_1_weight
ctx_size += n_layer * (3 * n_embd * n_embd * ggml_type_sizef(wtype)); // attn_Wqkv_weight
ctx_size += n_layer * (n_embd * n_embd * ggml_type_sizef(wtype)); // attn_out_proj_weight
ctx_size += n_layer * (n_embd * ggml_type_sizef(GGML_TYPE_F32)); // ln_2_weight
ctx_size += n_layer * (4 * n_embd * n_embd * ggml_type_sizef(wtype)); // mlp_mlp_up_weight
ctx_size += n_layer * (n_embd * n_embd * 4 * ggml_type_sizef(wtype)); // mlp_mlp_down_weight
ctx_size += n_ctx * n_layer * n_embd * ggml_type_sizef(GGML_TYPE_F16); // memory_k
ctx_size += n_ctx * n_layer * n_embd * ggml_type_sizef(GGML_TYPE_F16); // memory_v
ctx_size += (1 + 6 * n_layer) * 512; // object overhead
printf("%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size / (1024.0 * 1024.0));
}
// create the ggml context
{
struct ggml_init_params params = {
.mem_size = ctx_size,
.mem_buffer = NULL,
.no_alloc = false,
};
model.ctx = ggml_init(params);
if (!model.ctx) {
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
return false;
}
}
// prepare memory for the weights
{
const auto & hparams = model.hparams;
const size_t n_embd = hparams.d_model;
const size_t n_layer = hparams.n_layers;
const size_t n_vocab = hparams.n_vocab;
model.layers.resize(n_layer);
model.wte_weight = ggml_new_tensor_2d(ctx, wtype, n_embd, n_vocab);
model.norm_f_weight = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
// map by name
model.tensors["transformer.wte.weight"] = model.wte_weight;
model.tensors["transformer.norm_f.weight"] = model.norm_f_weight;
for (int i = 0; i < (int) n_layer; ++i) {
auto & layer = model.layers[i];
layer.norm_1_weight = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.c_attn_wqkv_weight = ggml_new_tensor_2d(ctx, wtype, n_embd, 3 * n_embd);
layer.c_attn_out_proj_weight = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
layer.norm_2_weight = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
layer.ffn_up_proj = ggml_new_tensor_2d(ctx, wtype, n_embd, 4 * n_embd);
layer.ffn_down_proj = ggml_new_tensor_2d(ctx, wtype, 4 * n_embd, n_embd);
// map by name
model.tensors["transformer.blocks." + std::to_string(i) + ".norm_1.weight"] = layer.norm_1_weight;
model.tensors["transformer.blocks." + std::to_string(i) + ".attn.Wqkv.weight"] = layer.c_attn_wqkv_weight;
model.tensors["transformer.blocks." + std::to_string(i) + ".attn.out_proj.weight"] = layer.c_attn_out_proj_weight;
model.tensors["transformer.blocks." + std::to_string(i) + ".norm_2.weight"] = layer.norm_2_weight;
model.tensors["transformer.blocks." + std::to_string(i) + ".ffn.up_proj.weight"] = layer.ffn_up_proj;
model.tensors["transformer.blocks." + std::to_string(i) + ".ffn.down_proj.weight"] = layer.ffn_down_proj;
}
}
// key + value memory
{
const auto & hparams = model.hparams;
const size_t n_embd = hparams.d_model;
const size_t n_layer = hparams.n_layers;
const int64_t n_mem = n_layer * n_ctx;
const int64_t n_elements = n_embd * n_mem;
model.memory_k = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
model.memory_v = ggml_new_tensor_1d(ctx, GGML_TYPE_F16, n_elements);
const size_t memory_size = ggml_nbytes(model.memory_k) + ggml_nbytes(model.memory_v);
printf("%s: memory_size = %8.2f MB, n_mem = %" PRId64 "\n", __func__, memory_size / 1024.0 / 1024.0, n_mem);
}
// load weights
{
int n_tensors = 0;
size_t total_size = 0;
printf("%s: ", __func__);
while (true) {
int32_t n_dims;
int32_t length;
int32_t ttype;
fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
fin.read(reinterpret_cast<char *>(&length), sizeof(length));
fin.read(reinterpret_cast<char *>(&ttype), sizeof(ttype));
if (fin.eof()) {
break;
}
int32_t nelements = 1;
int32_t ne[2] = {1, 1};
for (int i = 0; i < n_dims; ++i) {
fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
nelements *= ne[i];
}
std::string name(length, 0);
fin.read(&name[0], length);
if (model.tensors.find(name.data()) == model.tensors.end()) {
fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
return false;
}
auto tensor = model.tensors[name.data()];
if (ggml_nelements(tensor) != nelements) {
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
return false;
}
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
fprintf(stderr,
"%s: tensor '%s' has wrong shape in model file: got [%5d, "
"%5d], expected [%5d, %5d]\n",
__func__, name.data(), (int)tensor->ne[0], (int)tensor->ne[1], ne[0], ne[1]);
return false;
}
// for debugging
if (0) {
printf("%24s - [%5d, %5d], type = %6s, %6.2f MB, %9zu bytes\n", name.data(), ne[0], ne[1],
ggml_type_name(ggml_type(ttype)), ggml_nbytes(tensor) / 1024.0 / 1024.0, ggml_nbytes(tensor));
}
const size_t bpe = ggml_type_size(ggml_type(ttype));
if ((nelements * bpe) / ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
fprintf(stderr,
"%s: tensor '%s' has wrong size in model file: got %zu, "
"expected %zu\n",
__func__, name.data(), ggml_nbytes(tensor), nelements * bpe);
return false;
}
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
total_size += ggml_nbytes(tensor);
if (++n_tensors % 8 == 0) {
printf(".");
fflush(stdout);
}
}
printf(" done\n");
printf("%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size / 1024.0 / 1024.0, n_tensors);
}
fin.close();
return true;
}
// evaluate the transformer
//
// - model: the model
// - n_threads: number of threads to use
// - n_past: the context size so far
// - embd_inp: the embeddings of the tokens in the context
// - embd_w: the predicted logits for the next token
//
bool mpt_eval(const mpt_model & model, const int n_threads, const int n_past,
const std::vector<gpt_vocab::id> & embd_inp, std::vector<float> & embd_w, bool logits_all, size_t & mem_per_token) {
const int N = embd_inp.size();
const auto & hparams = model.hparams;
const int n_embd = hparams.d_model;
const int n_layer = hparams.n_layers;
const int n_head = hparams.n_heads;
const int n_vocab = hparams.n_vocab;
const int n_ctx = hparams.n_ctx;
static size_t buf_size = 256u * 1024 * 1024;
static void * buf = malloc(buf_size);
// use 2 scratch buffers
// TODO: very hacky solution - reimplement in a more elegant way
static size_t scr0_size = 256u*1024*1024;
static void * scr0 = malloc(scr0_size);
static size_t scr1_size = 256u*1024*1024;
static void * scr1 = malloc(scr1_size);
if (mem_per_token > 0 && mem_per_token * N > buf_size) {
const size_t buf_size_new = 1.1 * (mem_per_token * N); // add 10% to account for ggml object overhead
// printf("\n%s: reallocating buffer from %zu to %zu bytes\n", __func__,
// buf_size, buf_size_new);
// reallocate
buf_size = buf_size_new;
buf = realloc(buf, buf_size);
if (buf == nullptr) {
fprintf(stderr, "%s: failed to allocate %zu bytes\n", __func__, buf_size);
return false;
}
}
struct ggml_init_params params;
params.mem_size = buf_size;
params.mem_buffer = buf;
params.no_alloc = false;
struct ggml_context * ctx0 = ggml_init(params);
struct ggml_cgraph gf = {.n_threads = n_threads};
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, embd_inp.data(), N * ggml_element_size(embd));
struct ggml_tensor * inpL = ggml_get_rows(ctx0, model.wte_weight, embd);
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * cur;
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
// a = self.ln_1(x)
{
cur = ggml_norm(ctx0, inpL);
cur = ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].norm_1_weight, cur), cur);
}
// self-attention
// b, _, past_key_value = self.attn(a, past_key_value=past_key_value,
// attn_bias=attn_bias, attention_mask=attention_mask,
// is_causal=is_causal)
{
// compute QKV
cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_wqkv_weight, cur);
if (model.hparams.clip_qkv > 0.0f) {
cur = ggml_clamp(ctx0, cur, -model.hparams.clip_qkv, model.hparams.clip_qkv);
}
struct ggml_tensor * Qcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 0 * sizeof(float) * n_embd);
struct ggml_tensor * Kcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 1 * sizeof(float) * n_embd);
struct ggml_tensor * Vcur = ggml_view_2d(ctx0, cur, n_embd, N, cur->nb[1], 2 * sizeof(float) * n_embd);
// store key and value to memory
{
struct ggml_tensor * k =
ggml_view_1d(ctx0, model.memory_k, N * n_embd,
(ggml_element_size(model.memory_k) * n_embd) * (il * n_ctx + n_past));
struct ggml_tensor * v =
ggml_view_1d(ctx0, model.memory_v, N * n_embd,
(ggml_element_size(model.memory_v) * n_embd) * (il * n_ctx + n_past));
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Kcur, k));
ggml_build_forward_expand(&gf, ggml_cpy(ctx0, Vcur, v));
}
// Q = Qcur.contiguous().view(n_embd/n_head, n_head, N).permute(0,
// 2, 1, 3) [64, N, 12]
struct ggml_tensor * Q = ggml_permute(
ctx0, ggml_cpy(ctx0, Qcur, ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_embd / n_head, n_head, N)), 0, 2,
1, 3);
// K = Kmem.view(n_embd/n_head, n_head, n_past + N).permute(0, 2, 1,
// 3) [64, n_past + N, 12]
struct ggml_tensor * K =
ggml_permute(ctx0,
ggml_reshape_3d(ctx0,
ggml_view_1d(ctx0, model.memory_k, (n_past + N) * n_embd,
il * n_ctx * ggml_element_size(model.memory_k) * n_embd),
n_embd / n_head, n_head, n_past + N),
0, 2, 1, 3);
// K * Q
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
// KQ_scaled = KQ / sqrt(n_embd/n_head)
struct ggml_tensor * KQ_scaled =
ggml_scale(ctx0, KQ, ggml_new_f32(ctx0, 1.0f / sqrt(float(n_embd) / n_head)));
struct ggml_tensor * KQ_scaled_alibi =
ggml_alibi(ctx0, KQ_scaled, n_past, n_head, model.hparams.alibi_bias_max);
// KQ_masked = mask_past(KQ_scaled)
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled_alibi, n_past);
// KQ = soft_max(KQ_masked)
struct ggml_tensor * KQ_soft_max = ggml_soft_max(ctx0, KQ_masked);
// V_trans = Vmem.view(n_embd/n_head, n_head, n_past + N).permute(1,
// 2, 0, 3).contiguous() [n_past + N, 64, 12]
struct ggml_tensor * V_trans = ggml_cpy(
ctx0,
ggml_permute(ctx0,
ggml_reshape_3d(ctx0,
ggml_view_1d(ctx0, model.memory_v, (n_past + N) * n_embd,
il * n_ctx * ggml_element_size(model.memory_v) * n_embd),
n_embd / n_head, n_head, n_past + N),
1, 2, 0, 3),
ggml_new_tensor_3d(ctx0, model.memory_v->type, n_past + N, n_embd / n_head, n_head));
// KQV = transpose(V) * KQ_soft_max
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max);
// KQV_merged = KQV.permute(0, 2, 1, 3)
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
// cur = KQV_merged.contiguous().view(n_embd, N)
cur = ggml_cpy(ctx0, KQV_merged, ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, N));
// projection
{ cur = ggml_mul_mat(ctx0, model.layers[il].c_attn_out_proj_weight, cur); }
}
inpL = ggml_add(ctx0, inpL, cur);
ggml_set_scratch(ctx0, { 0, scr1_size, scr1, });
// m = self.ln_2(x)
{
cur = ggml_norm(ctx0, inpL);
cur = ggml_mul(ctx0, ggml_repeat(ctx0, model.layers[il].norm_2_weight, cur), cur);
}
// n = self.mlp(m)
{
cur = ggml_mul_mat(ctx0, model.layers[il].ffn_up_proj, cur);
// GELU activation
cur = ggml_gelu(ctx0, cur);
// projection
// cur = proj_w*cur + proj_b
cur = ggml_mul_mat(ctx0, model.layers[il].ffn_down_proj, cur);
}
// x = x + n
inpL = ggml_add(ctx0, inpL, cur);
}
ggml_set_scratch(ctx0, { 0, scr0_size, scr0, });
// norm
{
inpL = ggml_norm(ctx0, inpL);
// inpL = ln_f_g*inpL
inpL = ggml_mul(ctx0, ggml_repeat(ctx0, model.norm_f_weight, inpL), inpL);
}
ggml_set_scratch(ctx0, { 0, 0, nullptr, });
// output embedding weight tied to input embedding
inpL = ggml_mul_mat(ctx0, model.wte_weight, inpL);
// logits -> probs
// inpL = ggml_soft_max(ctx0, inpL);
// run the computation
ggml_build_forward_expand(&gf, inpL);
ggml_graph_compute(ctx0, &gf);
// std::cout << "Qcur" << std::endl;
// print_tensor(Qcur);
// if (n_past%100 == 0) {
// ggml_graph_print(&gf);
// ggml_graph_dump_dot(&gf, NULL, "mpt-model.dot");
// }
if (logits_all) {
// return result for all tokens
embd_w.resize(n_vocab *N);
memcpy(embd_w.data(), (float *)ggml_get_data(inpL) , sizeof(float) * n_vocab * N);
} else {
// return result for just the last token
embd_w.resize(n_vocab);
memcpy(embd_w.data(), (float *)ggml_get_data(inpL) + (n_vocab * (N - 1)), sizeof(float) * n_vocab);
}
if (mem_per_token == 0) {
mem_per_token = ggml_used_mem(ctx0) / N;
}
// printf("used_mem = %zu\n", ggml_used_mem(ctx0));
ggml_free(ctx0);
return true;
}

View file

@ -407,3 +407,50 @@ struct gpt_neox_model {
}; };
// no defaults for now
struct mpt_hparams {
int32_t d_model = 0;
int32_t max_seq_len = 0;
int32_t n_heads = 0;
int32_t n_layers = 0;
int32_t n_vocab = 0;
float alibi_bias_max = 0;
float clip_qkv = 0;
int32_t ftype = 0;
int32_t n_ctx = 0;
};
struct mpt_layer {
// pre normalization
struct ggml_tensor * norm_1_weight;
// attention
struct ggml_tensor * c_attn_wqkv_weight;
struct ggml_tensor * c_attn_out_proj_weight;
// post normalization
struct ggml_tensor * norm_2_weight;
// ff
struct ggml_tensor * ffn_up_proj;
struct ggml_tensor * ffn_down_proj;
};
struct mpt_model {
mpt_hparams hparams;
struct ggml_tensor * wte_weight; // position embedding
struct ggml_tensor * norm_f_weight; // language model head
std::vector<mpt_layer> layers;
// key + value memory
struct ggml_tensor * memory_k;
struct ggml_tensor * memory_v;
struct ggml_context * ctx;
std::map<std::string, struct ggml_tensor *> tensors;
};

View file

@ -0,0 +1,158 @@
import sys
import struct
import json
import numpy as np
from transformers import AutoModelForCausalLM, AutoTokenizer
import sentencepiece.sentencepiece_model_pb2 as model
# ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
if len(sys.argv) < 3:
print("Usage: convert-h5-to-ggml.py dir-model [use-f32]\n")
print(" ftype == 0 -> float32")
print(" ftype == 1 -> float16")
sys.exit(1)
# output in the same directory as the model
dir_model = sys.argv[1]
fname_out = sys.argv[1] + "/ggml-model.bin"
with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
hparams = json.load(f)
# possible data types
# ftype == 0 -> float32
# ftype == 1 -> float16
#
# map from ftype to string
ftype_str = ["f32", "f16"]
ftype = 1
if len(sys.argv) > 2:
ftype = int(sys.argv[2])
if ftype < 0 or ftype > 1:
print("Invalid ftype: " + str(ftype))
sys.exit(1)
fname_out = sys.argv[1] + "/ggml-model-" + ftype_str[ftype] + ".bin"
tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained(
dir_model, low_cpu_mem_usage=True, trust_remote_code=True
)
# print (model)
# print(tokenizer.encode('I believe the meaning of life is'))
list_vars = model.state_dict()
for name in list_vars.keys():
print(name, list_vars[name].shape, list_vars[name].dtype)
fout = open(fname_out, "wb")
print(hparams)
fout.write(struct.pack("i", 0x67676D6C)) # magic: ggml in hex
fout.write(struct.pack("i", hparams["d_model"]))
fout.write(struct.pack("i", hparams["max_seq_len"]))
fout.write(struct.pack("i", hparams["n_heads"]))
fout.write(struct.pack("i", hparams["n_layers"]))
fout.write(struct.pack("i", hparams["vocab_size"]))
fout.write(struct.pack("f", hparams["attn_config"]["alibi_bias_max"]))
fout.write(struct.pack("f", hparams["attn_config"]["clip_qkv"] or 0.0))
fout.write(struct.pack("i", ftype))
vocab_size = hparams["vocab_size"]
encoder = tokenizer.vocab
# Add added_tokens (special tokens) to the encoder
encoder.update(tokenizer.get_added_vocab())
byte_encoder = bytes_to_unicode()
byte_decoder = {v:k for k, v in byte_encoder.items()}
counter = 0
# sort by value
for key in sorted(encoder, key=encoder.get):
# workaround for key error when c not found
text=""
for c in key:
if c not in byte_decoder:
text += c
else:
text += chr(byte_decoder[c] )
text = bytearray( text, encoding="utf-8" )
fout.write(struct.pack("i", len(text)))
fout.write(text)
counter += 1
# Repeat last token until vocab_size
while counter < vocab_size:
fout.write(struct.pack("i", len(text)))
fout.write(text)
counter += 1
# assert counter == config.vocab_size
for name in list_vars.keys():
data = list_vars[name].squeeze().numpy()
print("Processing variable: " + name + " with shape: ", data.shape)
n_dims = len(data.shape)
# ftype == 0 -> float32, ftype == 1 -> float16
ftype_cur = 0
if ftype != 0:
if name[-7:] == ".weight" and n_dims == 2:
print(" Converting to float16")
data = data.astype(np.float16)
ftype_cur = 1
else:
print(" Converting to float32")
data = data.astype(np.float32)
ftype_cur = 0
else:
if data.dtype != np.float32:
print(" Converting to float32")
data = data.astype(np.float32)
ftype_cur = 0
# header
str = name.encode("utf-8")
fout.write(struct.pack("iii", n_dims, len(str), ftype_cur))
for i in range(n_dims):
fout.write(struct.pack("i", data.shape[n_dims - 1 - i]))
fout.write(str)
# data
data.tofile(fout)
fout.close()
print("Done. Output file: " + fname_out)
print("")

View file

@ -0,0 +1,184 @@
#include "utils.h"
#include "common-ggml.h"
#include <cassert>
#include <cmath>
#include <cstdio>
#include <cstring>
#include <fstream>
#include <map>
#include <string>
#include <vector>
#include <regex>
struct mpt_hparams {
int32_t d_model = 0;
int32_t max_seq_len = 0;
int32_t n_heads = 0;
int32_t n_layers = 0;
int32_t n_vocab = 0;
float alibi_bias_max = 0;
float clip_qkv = 0;
int32_t ftype = 0;
};
// quantize a model
bool mpt_model_quantize(const std::string & fname_inp,
const std::string & fname_out, ggml_ftype ftype) {
printf("%s: loading model from '%s'\n", __func__, fname_inp.c_str());
auto finp = std::ifstream(fname_inp, std::ios::binary);
if (!finp) {
fprintf(stderr, "%s: failed to open '%s' for reading\n", __func__,
fname_inp.c_str());
return false;
}
auto fout = std::ofstream(fname_out, std::ios::binary);
if (!fout) {
fprintf(stderr, "%s: failed to open '%s' for writing\n", __func__,
fname_out.c_str());
return false;
}
// verify magic
{
uint32_t magic;
finp.read((char *)&magic, sizeof(magic));
if (magic != 0x67676d6c) {
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n",
__func__, fname_inp.c_str());
return false;
}
fout.write((char *)&magic, sizeof(magic));
}
mpt_hparams hparams;
// load hparams
{
finp.read((char *) &hparams.d_model, sizeof(hparams.d_model));
finp.read((char *) &hparams.max_seq_len, sizeof(hparams.max_seq_len));
finp.read((char *) &hparams.n_heads, sizeof(hparams.n_heads));
finp.read((char *) &hparams.n_layers, sizeof(hparams.n_layers));
finp.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
finp.read((char *) &hparams.alibi_bias_max, sizeof(hparams.alibi_bias_max));
finp.read((char *) &hparams.clip_qkv, sizeof(hparams.clip_qkv));
finp.read((char *) &hparams.ftype, sizeof(hparams.ftype));
const int32_t qntvr_src = hparams.ftype / GGML_QNT_VERSION_FACTOR;
const int32_t ftype_dst = GGML_QNT_VERSION * GGML_QNT_VERSION_FACTOR + ftype;
printf("%s: d_model = %d\n", __func__, hparams.d_model);
printf("%s: max_seq_len = %d\n", __func__, hparams.max_seq_len);
printf("%s: n_heads = %d\n", __func__, hparams.n_heads);
printf("%s: n_layers = %d\n", __func__, hparams.n_layers);
printf("%s: n_vocab = %d\n", __func__, hparams.n_vocab);
printf("%s: alibi_bias_max = %f\n", __func__, hparams.alibi_bias_max);
printf("%s: clip_qkv = %f\n", __func__, hparams.clip_qkv);
printf("%s: ftype (src) = %d\n", __func__, hparams.ftype);
printf("%s: qntvr (src) = %d\n", __func__, qntvr_src);
printf("%s: ftype (dst) = %d\n", __func__, ftype_dst);
printf("%s: qntvr (dst) = %d\n", __func__, GGML_QNT_VERSION);
fout.write((char *) &hparams.d_model, sizeof(hparams.d_model));
fout.write((char *) &hparams.max_seq_len, sizeof(hparams.max_seq_len));
fout.write((char *) &hparams.n_heads, sizeof(hparams.n_heads));
fout.write((char *) &hparams.n_layers, sizeof(hparams.n_layers));
fout.write((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
fout.write((char *) &hparams.alibi_bias_max, sizeof(hparams.alibi_bias_max));
fout.write((char *) &hparams.clip_qkv, sizeof(hparams.clip_qkv));
fout.write((char *) &ftype_dst, sizeof(ftype_dst));
}
// load vocab
{
const int32_t n_vocab = hparams.n_vocab;
std::string word;
for (int i = 0; i < n_vocab; i++) {
uint32_t len;
finp.read((char *)&len, sizeof(len));
fout.write((char *)&len, sizeof(len));
word.resize(len);
finp.read((char *)word.data(), len);
fout.write((char *)word.data(), len);
}
}
printf("%s: quantizing tensors\n", __func__);
// regexes of tensor names to be quantized
const std::vector<std::string> to_quant = {
".*weight",
};
if (!ggml_common_quantize_0(finp, fout, ftype, to_quant, {})) {
fprintf(stderr, "%s: failed to quantize model '%s'\n", __func__,
fname_inp.c_str());
return false;
}
finp.close();
fout.close();
return true;
}
// usage:
// ./mpt-quantize models/mpt/ggml-model.bin
// models/mpt/ggml-model-quant.bin type
//
int main(int argc, char ** argv) {
if (argc != 4) {
fprintf(stderr, "usage: %s model-f32.bin model-quant.bin type\n",
argv[0]);
ggml_print_ftypes(stderr);
return 1;
}
// needed to initialize f16 tables
{
struct ggml_init_params params = {0, NULL, false};
struct ggml_context * ctx = ggml_init(params);
ggml_free(ctx);
}
const std::string fname_inp = argv[1];
const std::string fname_out = argv[2];
const ggml_ftype ftype = ggml_parse_ftype(argv[3]);
const int64_t t_main_start_us = ggml_time_us();
int64_t t_quantize_us = 0;
// load the model
{
const int64_t t_start_us = ggml_time_us();
if (!mpt_model_quantize(fname_inp, fname_out, ggml_ftype(ftype))) {
fprintf(stderr, "%s: failed to quantize model from '%s'\n",
__func__, fname_inp.c_str());
return 1;
}
t_quantize_us = ggml_time_us() - t_start_us;
}
// report timing
{
const int64_t t_main_end_us = ggml_time_us();
printf("\n");
printf("%s: quantize time = %8.2f ms\n", __func__,
t_quantize_us / 1000.0f);
printf("%s: total time = %8.2f ms\n", __func__,
(t_main_end_us - t_main_start_us) / 1000.0f);
}
return 0;
}

View file

@ -1,7 +1,12 @@
#include "utils.h" #include "utils.h"
#include <cmath>
#include <cstring>
#include <fstream> #include <fstream>
#include <regex> #include <regex>
#include <locale>
#include <codecvt>
#include <sstream>
@ -109,24 +114,16 @@ void gpt_vocab::add_special_token(const std::string & token) {
special_tokens.push_back(token); special_tokens.push_back(token);
} }
static void append_utf8(char32_t ch, std::string & out) {
if (ch <= 0x7F) { std::string convert_to_utf8(const std::wstring & input) {
out.push_back(static_cast<unsigned char>(ch)); std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;
} else if (ch <= 0x7FF) { return converter.to_bytes(input);
out.push_back(static_cast<unsigned char>(0xC0 | ((ch >> 6) & 0x1F)));
out.push_back(static_cast<unsigned char>(0x80 | (ch & 0x3F)));
} else if (ch <= 0xFFFF) {
out.push_back(static_cast<unsigned char>(0xE0 | ((ch >> 12) & 0x0F)));
out.push_back(static_cast<unsigned char>(0x80 | ((ch >> 6) & 0x3F)));
out.push_back(static_cast<unsigned char>(0x80 | (ch & 0x3F)));
} else if (ch <= 0x10FFFF) {
out.push_back(static_cast<unsigned char>(0xF0 | ((ch >> 18) & 0x07)));
out.push_back(static_cast<unsigned char>(0x80 | ((ch >> 12) & 0x3F)));
out.push_back(static_cast<unsigned char>(0x80 | ((ch >> 6) & 0x3F)));
out.push_back(static_cast<unsigned char>(0x80 | (ch & 0x3F)));
} else {
printf("Invalid Unicode code point\n");
} }
std::wstring convert_to_wstring(const std::string & input) {
std::wstring_convert<std::codecvt_utf8<wchar_t>> converter;
return converter.from_bytes(input);
} }
std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::string & text) { std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::string & text) {
@ -162,39 +159,26 @@ std::vector<gpt_vocab::id> gpt_tokenize(const gpt_vocab & vocab, const std::stri
} }
} }
// find the longest tokens that form the words: // find the longest token that forms each word in words:
std::vector<gpt_vocab::id> tokens; std::vector<gpt_vocab::id> tokens;
for (const auto & word : words) { for (const auto & word : words) {
if (word.size() == 0) continue; for (int i = 0; i < word.size(); ){
for (int j = word.size() - 1; j >= i; j--){
int i = 0; auto cand = word.substr(i, j-i+1);
int n = word.size(); auto it = vocab.token_to_id.find(cand);
while (i < n) { if (it != vocab.token_to_id.end()){ // word.substr(i, j-i+1) in vocab
int j = n;
while (j > i) {
auto it = vocab.token_to_id.find(word.substr(i, j-i));
if (it != vocab.token_to_id.end()) {
tokens.push_back(it->second); tokens.push_back(it->second);
i = j; i = j + 1;
j = n;
continue;
}
--j;
}
if (i == n) {
break; break;
} }
if (j == i) { else if (j == i){ // word.substr(i, 1) has no matching
auto sub = word.substr(i, 1); fprintf(stderr, "%s: unknown token '%s'\n", __func__, word.substr(i, 1).data());
if (vocab.token_to_id.find(sub) != vocab.token_to_id.end()) { i++;
tokens.push_back(vocab.token_to_id.at(sub));
} else {
fprintf(stderr, "%s: unknown token '%s'\n", __func__, sub.data());
}
++i;
} }
} }
} }
}
return tokens; return tokens;
} }