fixed auto rope scaling (+1 squashed commits)
Squashed commits: [b1767874] wip
This commit is contained in:
parent
0bf75b05dc
commit
7fb809b94b
5 changed files with 42 additions and 28 deletions
33
expose.cpp
33
expose.cpp
|
@ -27,6 +27,7 @@ extern "C"
|
||||||
|
|
||||||
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
||||||
static FileFormat file_format = FileFormat::BADFORMAT;
|
static FileFormat file_format = FileFormat::BADFORMAT;
|
||||||
|
static FileFormatExtraMeta file_format_meta;
|
||||||
|
|
||||||
bool load_model(const load_model_inputs inputs)
|
bool load_model(const load_model_inputs inputs)
|
||||||
{
|
{
|
||||||
|
@ -36,11 +37,9 @@ extern "C"
|
||||||
|
|
||||||
int forceversion = inputs.forceversion;
|
int forceversion = inputs.forceversion;
|
||||||
|
|
||||||
if(forceversion==0)
|
file_format = check_file_format(model.c_str(),&file_format_meta);
|
||||||
{
|
|
||||||
file_format = check_file_format(model.c_str());
|
if(forceversion!=0)
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
{
|
||||||
printf("\nWARNING: FILE FORMAT FORCED TO VER %d\nIf incorrect, loading may fail or crash.\n",forceversion);
|
printf("\nWARNING: FILE FORMAT FORCED TO VER %d\nIf incorrect, loading may fail or crash.\n",forceversion);
|
||||||
file_format = (FileFormat)forceversion;
|
file_format = (FileFormat)forceversion;
|
||||||
|
@ -64,7 +63,7 @@ extern "C"
|
||||||
if(file_format==FileFormat::GPTJ_1 || file_format==FileFormat::GPTJ_2 || file_format==FileFormat::GPTJ_3 || file_format==FileFormat::GPTJ_4 || file_format==FileFormat::GPTJ_5)
|
if(file_format==FileFormat::GPTJ_1 || file_format==FileFormat::GPTJ_2 || file_format==FileFormat::GPTJ_3 || file_format==FileFormat::GPTJ_4 || file_format==FileFormat::GPTJ_5)
|
||||||
{
|
{
|
||||||
printf("\n---\nIdentified as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nIdentified as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
ModelLoadResult lr = gpttype_load_model(inputs, file_format);
|
ModelLoadResult lr = gpttype_load_model(inputs, file_format, file_format_meta);
|
||||||
if (lr == ModelLoadResult::RETRY_LOAD)
|
if (lr == ModelLoadResult::RETRY_LOAD)
|
||||||
{
|
{
|
||||||
if(file_format==FileFormat::GPTJ_1)
|
if(file_format==FileFormat::GPTJ_1)
|
||||||
|
@ -73,14 +72,14 @@ extern "C"
|
||||||
//otherwise if we tried 3 first, then try 2
|
//otherwise if we tried 3 first, then try 2
|
||||||
file_format = FileFormat::GPTJ_4;
|
file_format = FileFormat::GPTJ_4;
|
||||||
printf("\n---\nRetrying as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nRetrying as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
lr = gpttype_load_model(inputs, file_format);
|
lr = gpttype_load_model(inputs, file_format, file_format_meta);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (lr == ModelLoadResult::RETRY_LOAD)
|
if (lr == ModelLoadResult::RETRY_LOAD)
|
||||||
{
|
{
|
||||||
file_format = FileFormat::GPTJ_3;
|
file_format = FileFormat::GPTJ_3;
|
||||||
printf("\n---\nRetrying as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nRetrying as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
lr = gpttype_load_model(inputs, file_format);
|
lr = gpttype_load_model(inputs, file_format, file_format_meta);
|
||||||
}
|
}
|
||||||
|
|
||||||
//lastly try format 2
|
//lastly try format 2
|
||||||
|
@ -88,7 +87,7 @@ extern "C"
|
||||||
{
|
{
|
||||||
file_format = FileFormat::GPTJ_2;
|
file_format = FileFormat::GPTJ_2;
|
||||||
printf("\n---\nRetrying as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nRetrying as GPT-J model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
lr = gpttype_load_model(inputs, file_format);
|
lr = gpttype_load_model(inputs, file_format, file_format_meta);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -104,18 +103,18 @@ extern "C"
|
||||||
else if(file_format==FileFormat::GPT2_1||file_format==FileFormat::GPT2_2||file_format==FileFormat::GPT2_3||file_format==FileFormat::GPT2_4)
|
else if(file_format==FileFormat::GPT2_1||file_format==FileFormat::GPT2_2||file_format==FileFormat::GPT2_3||file_format==FileFormat::GPT2_4)
|
||||||
{
|
{
|
||||||
printf("\n---\nIdentified as GPT-2 model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nIdentified as GPT-2 model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
ModelLoadResult lr = gpttype_load_model(inputs, file_format);
|
ModelLoadResult lr = gpttype_load_model(inputs, file_format, file_format_meta);
|
||||||
if (lr == ModelLoadResult::RETRY_LOAD)
|
if (lr == ModelLoadResult::RETRY_LOAD)
|
||||||
{
|
{
|
||||||
file_format = FileFormat::GPT2_3;
|
file_format = FileFormat::GPT2_3;
|
||||||
printf("\n---\nRetrying as GPT-2 model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nRetrying as GPT-2 model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
lr = gpttype_load_model(inputs, file_format);
|
lr = gpttype_load_model(inputs, file_format, file_format_meta);
|
||||||
}
|
}
|
||||||
if (lr == ModelLoadResult::RETRY_LOAD)
|
if (lr == ModelLoadResult::RETRY_LOAD)
|
||||||
{
|
{
|
||||||
file_format = FileFormat::GPT2_2;
|
file_format = FileFormat::GPT2_2;
|
||||||
printf("\n---\nRetrying as GPT-2 model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nRetrying as GPT-2 model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
lr = gpttype_load_model(inputs, file_format);
|
lr = gpttype_load_model(inputs, file_format, file_format_meta);
|
||||||
}
|
}
|
||||||
if (lr == ModelLoadResult::FAIL || lr == ModelLoadResult::RETRY_LOAD)
|
if (lr == ModelLoadResult::FAIL || lr == ModelLoadResult::RETRY_LOAD)
|
||||||
{
|
{
|
||||||
|
@ -129,27 +128,27 @@ extern "C"
|
||||||
else if(file_format==FileFormat::NEOX_1 || file_format==FileFormat::NEOX_2 || file_format==FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5 || file_format==FileFormat::NEOX_6 || file_format==FileFormat::NEOX_7)
|
else if(file_format==FileFormat::NEOX_1 || file_format==FileFormat::NEOX_2 || file_format==FileFormat::NEOX_3 || file_format==FileFormat::NEOX_4 || file_format==FileFormat::NEOX_5 || file_format==FileFormat::NEOX_6 || file_format==FileFormat::NEOX_7)
|
||||||
{
|
{
|
||||||
printf("\n---\nIdentified as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nIdentified as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
ModelLoadResult lr = gpttype_load_model(inputs, file_format);
|
ModelLoadResult lr = gpttype_load_model(inputs, file_format, file_format_meta);
|
||||||
if (lr == ModelLoadResult::RETRY_LOAD)
|
if (lr == ModelLoadResult::RETRY_LOAD)
|
||||||
{
|
{
|
||||||
if(file_format==FileFormat::NEOX_2)
|
if(file_format==FileFormat::NEOX_2)
|
||||||
{
|
{
|
||||||
file_format = FileFormat::NEOX_3;
|
file_format = FileFormat::NEOX_3;
|
||||||
printf("\n---\nRetrying as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nRetrying as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
lr = gpttype_load_model(inputs, file_format);
|
lr = gpttype_load_model(inputs, file_format, file_format_meta);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
file_format = FileFormat::NEOX_5;
|
file_format = FileFormat::NEOX_5;
|
||||||
printf("\n---\nRetrying as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nRetrying as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
lr = gpttype_load_model(inputs, file_format);
|
lr = gpttype_load_model(inputs, file_format, file_format_meta);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (lr == ModelLoadResult::RETRY_LOAD)
|
if (lr == ModelLoadResult::RETRY_LOAD)
|
||||||
{
|
{
|
||||||
file_format = FileFormat::NEOX_1;
|
file_format = FileFormat::NEOX_1;
|
||||||
printf("\n---\nRetrying as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nRetrying as GPT-NEO-X model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
lr = gpttype_load_model(inputs, file_format);
|
lr = gpttype_load_model(inputs, file_format, file_format_meta);
|
||||||
}
|
}
|
||||||
if (lr == ModelLoadResult::FAIL || lr == ModelLoadResult::RETRY_LOAD)
|
if (lr == ModelLoadResult::FAIL || lr == ModelLoadResult::RETRY_LOAD)
|
||||||
{
|
{
|
||||||
|
@ -178,7 +177,7 @@ extern "C"
|
||||||
{
|
{
|
||||||
printf("\n---\nIdentified as LLAMA model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
printf("\n---\nIdentified as LLAMA model: (ver %d)\nAttempting to Load...\n---\n", file_format);
|
||||||
}
|
}
|
||||||
ModelLoadResult lr = gpttype_load_model(inputs, file_format);
|
ModelLoadResult lr = gpttype_load_model(inputs, file_format, file_format_meta);
|
||||||
if (lr == ModelLoadResult::FAIL || lr == ModelLoadResult::RETRY_LOAD)
|
if (lr == ModelLoadResult::FAIL || lr == ModelLoadResult::RETRY_LOAD)
|
||||||
{
|
{
|
||||||
return false;
|
return false;
|
||||||
|
|
|
@ -393,7 +393,7 @@ static std::string RemoveBell(const std::string & input) //removes the bell char
|
||||||
return word2;
|
return word2;
|
||||||
}
|
}
|
||||||
|
|
||||||
ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in_file_format)
|
ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in_file_format, FileFormatExtraMeta file_format_meta)
|
||||||
{
|
{
|
||||||
ggml_time_init();
|
ggml_time_init();
|
||||||
|
|
||||||
|
@ -438,11 +438,11 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
||||||
{
|
{
|
||||||
//approximate NTK aware ctx
|
//approximate NTK aware ctx
|
||||||
auto effectivenctx = params.n_ctx;
|
auto effectivenctx = params.n_ctx;
|
||||||
// if((file_format == FileFormat::GGUF_LLAMA || file_format==FileFormat::GGUF_FALCON) && llama_ctx_v4->model.hparams.n_ctx_train>2048)
|
if((file_format == FileFormat::GGUF_LLAMA || file_format==FileFormat::GGUF_FALCON) && file_format_meta.n_ctx_train > 2048)
|
||||||
// {
|
{
|
||||||
// float factor = llama_ctx_v4->model.hparams.n_ctx_train/2048;
|
float factor = file_format_meta.n_ctx_train/2048;
|
||||||
// effectivenctx = effectivenctx/factor;
|
effectivenctx = effectivenctx/factor;
|
||||||
// }
|
}
|
||||||
rope_freq_base = (effectivenctx <= 3072 ? 26000.0f : (effectivenctx <= 4096 ? 32000.0f : (effectivenctx <= 6144 ? 54000.0f : (effectivenctx <= 8192 ? 82684.0f : (effectivenctx <= 12288 ? 140000.0f : 200000.0f)))));
|
rope_freq_base = (effectivenctx <= 3072 ? 26000.0f : (effectivenctx <= 4096 ? 32000.0f : (effectivenctx <= 6144 ? 54000.0f : (effectivenctx <= 8192 ? 82684.0f : (effectivenctx <= 12288 ? 140000.0f : 200000.0f)))));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -330,7 +330,7 @@ maxhordectx = 1024
|
||||||
maxhordelen = 256
|
maxhordelen = 256
|
||||||
modelbusy = threading.Lock()
|
modelbusy = threading.Lock()
|
||||||
defaultport = 5001
|
defaultport = 5001
|
||||||
KcppVersion = "1.42.1"
|
KcppVersion = "1.43"
|
||||||
showdebug = True
|
showdebug = True
|
||||||
showsamplerwarning = True
|
showsamplerwarning = True
|
||||||
showmaxctxwarning = True
|
showmaxctxwarning = True
|
||||||
|
|
|
@ -80,7 +80,7 @@ void print_tok_vec(std::vector<float> &embd)
|
||||||
}
|
}
|
||||||
|
|
||||||
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
||||||
FileFormat check_file_format(const std::string & fname)
|
FileFormat check_file_format(const std::string & fname, FileFormatExtraMeta * fileformatmeta)
|
||||||
{
|
{
|
||||||
std::vector<char> f_buf(1024*1024);
|
std::vector<char> f_buf(1024*1024);
|
||||||
|
|
||||||
|
@ -266,7 +266,7 @@ void print_tok_vec(std::vector<float> &embd)
|
||||||
auto keyidx = gguf_find_key(ctx, "general.architecture");
|
auto keyidx = gguf_find_key(ctx, "general.architecture");
|
||||||
std::string modelarch = "";
|
std::string modelarch = "";
|
||||||
if (keyidx != -1) { modelarch = gguf_get_val_str(ctx, keyidx); }
|
if (keyidx != -1) { modelarch = gguf_get_val_str(ctx, keyidx); }
|
||||||
gguf_free(ctx);
|
|
||||||
if(modelarch=="llama")
|
if(modelarch=="llama")
|
||||||
{
|
{
|
||||||
fileformat = FileFormat::GGUF_LLAMA;
|
fileformat = FileFormat::GGUF_LLAMA;
|
||||||
|
@ -280,6 +280,16 @@ void print_tok_vec(std::vector<float> &embd)
|
||||||
{
|
{
|
||||||
printf("\nERROR: Detected unimplemented GGUF Arch: %s\n",modelarch.c_str());
|
printf("\nERROR: Detected unimplemented GGUF Arch: %s\n",modelarch.c_str());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(modelarch!="" && fileformatmeta!=nullptr)
|
||||||
|
{
|
||||||
|
std::string fkey = modelarch+".context_length";
|
||||||
|
auto keyidx = gguf_find_key(ctx, fkey.c_str());
|
||||||
|
if (keyidx != -1) {
|
||||||
|
fileformatmeta->n_ctx_train = gguf_get_val_u32(ctx, keyidx);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
gguf_free(ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(fin.is_open())
|
if(fin.is_open())
|
||||||
|
|
|
@ -51,6 +51,11 @@ enum FileFormat
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct FileFormatExtraMeta
|
||||||
|
{
|
||||||
|
int n_ctx_train = 2048;
|
||||||
|
};
|
||||||
|
|
||||||
enum ModelLoadResult
|
enum ModelLoadResult
|
||||||
{
|
{
|
||||||
FAIL = 0,
|
FAIL = 0,
|
||||||
|
@ -58,7 +63,7 @@ enum ModelLoadResult
|
||||||
RETRY_LOAD = 2, //used if it's suspected that the model is an older format
|
RETRY_LOAD = 2, //used if it's suspected that the model is an older format
|
||||||
};
|
};
|
||||||
|
|
||||||
ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in_file_format);
|
ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in_file_format, FileFormatExtraMeta file_format_meta);
|
||||||
generation_outputs gpttype_generate(const generation_inputs inputs, generation_outputs &output);
|
generation_outputs gpttype_generate(const generation_inputs inputs, generation_outputs &output);
|
||||||
bool gpttype_generate_abort();
|
bool gpttype_generate_abort();
|
||||||
const std::string & gpttype_get_pending_output();
|
const std::string & gpttype_get_pending_output();
|
||||||
|
@ -73,7 +78,7 @@ std::vector<int> LongestCommonSubseq(const std::vector<int> x, const std::vector
|
||||||
bool ArrStartWith(const std::vector<int> targetArray, const std::vector<int> searchSeq);
|
bool ArrStartWith(const std::vector<int> targetArray, const std::vector<int> searchSeq);
|
||||||
int ArrFindIndexOf(const std::vector<int> targetArray, const std::vector<int> searchSeq);
|
int ArrFindIndexOf(const std::vector<int> targetArray, const std::vector<int> searchSeq);
|
||||||
|
|
||||||
FileFormat check_file_format(const std::string & fname);
|
FileFormat check_file_format(const std::string & fname, FileFormatExtraMeta * fileformatmeta);
|
||||||
void ContextFastForward(std::vector<int> ¤t_context_tokens, std::vector<int> &embd_inp,
|
void ContextFastForward(std::vector<int> ¤t_context_tokens, std::vector<int> &embd_inp,
|
||||||
int &n_past, std::vector<int> &last_n_tokens, const int nctx, std::vector<int> &smartcontext,
|
int &n_past, std::vector<int> &last_n_tokens, const int nctx, std::vector<int> &smartcontext,
|
||||||
const bool useSmartContext, const bool requireFullSubset);
|
const bool useSmartContext, const bool requireFullSubset);
|
Loading…
Add table
Add a link
Reference in a new issue