still refactoring, but need a checkpoint to prepare build for 1.0.7
This commit is contained in:
parent
6b86f5ea22
commit
801b178f2a
11 changed files with 452 additions and 44 deletions
12
Makefile
12
Makefile
|
@ -211,7 +211,7 @@ endif
|
|||
|
||||
BLAS_BUILD =
|
||||
ifeq ($(OS),Windows_NT)
|
||||
BLAS_BUILD = $(CXX) $(CXXFLAGS) ggml_blas.o common.o extra.o expose.o llama_adapter.o libopenblas.lib -shared -o llamacpp_blas.dll $(LDFLAGS)
|
||||
BLAS_BUILD = $(CXX) $(CXXFLAGS) ggml_blas.o common.o extra.o expose.o model_adapter.o libopenblas.lib -shared -o llamacpp_blas.dll $(LDFLAGS)
|
||||
else
|
||||
BLAS_BUILD = @echo 'Your OS is $(OS) and does not appear to be Windows. If you want to use openblas, please link it manually with LLAMA_OPENBLAS=1'
|
||||
endif
|
||||
|
@ -258,8 +258,8 @@ extra.o: extra.cpp extra.h
|
|||
expose.o: expose.cpp expose.h
|
||||
$(CXX) $(CXXFLAGS) -c expose.cpp -o expose.o
|
||||
|
||||
llama_adapter.o: llama_adapter.cpp llama_adapter.h
|
||||
$(CXX) $(CXXFLAGS) -c llama_adapter.cpp -o llama_adapter.o
|
||||
model_adapter.o:
|
||||
$(CXX) $(CXXFLAGS) -c llama_adapter.cpp -o model_adapter.o
|
||||
|
||||
clean:
|
||||
rm -vf *.o main quantize perplexity embedding main.exe quantize.exe llamacpp.dll llamacpp_blas.dll gpt2.exe gptj.exe
|
||||
|
@ -274,10 +274,10 @@ gptj: ggml_old_v1.o
|
|||
$(CXX) $(CXXFLAGS) otherarch/gptj_old.cpp otherarch/utils.cpp ggml_old_v1.o -o gptj $(LDFLAGS)
|
||||
|
||||
|
||||
llamalib: ggml.o common.o extra.o expose.o llama_adapter.o
|
||||
$(CXX) $(CXXFLAGS) ggml.o common.o extra.o expose.o llama_adapter.o -shared -o llamacpp.dll $(LDFLAGS)
|
||||
llamalib: ggml.o common.o extra.o expose.o model_adapter.o
|
||||
$(CXX) $(CXXFLAGS) ggml.o common.o extra.o expose.o model_adapter.o -shared -o llamacpp.dll $(LDFLAGS)
|
||||
|
||||
llamalib_blas: ggml_blas.o common.o extra.o expose.o llama_adapter.o
|
||||
llamalib_blas: ggml_blas.o common.o extra.o expose.o model_adapter.o
|
||||
$(BLAS_BUILD)
|
||||
|
||||
quantize: examples/quantize/quantize.cpp ggml.o llama.o
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
A self contained distributable from Concedo that exposes llama.cpp function bindings, allowing it to be used via a simulated Kobold API endpoint.
|
||||
|
||||
What does it mean? You get llama.cpp with a fancy UI, persistent stories, editing tools, save formats, memory, world info, author's note, characters, scenarios and everything Kobold and Kobold Lite have to offer. In a tiny package under 1 MB in size, excluding model weights.
|
||||
What does it mean? You get llama.cpp with a fancy UI, persistent stories, editing tools, save formats, memory, world info, author's note, characters, scenarios and everything Kobold and Kobold Lite have to offer. In a tiny package around 10 MB in size, excluding model weights.
|
||||
|
||||

|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@
|
|||
//No dynamic memory allocation! Setup structs with FIXED (known) shapes and sizes for ALL output fields
|
||||
//Python will ALWAYS provide the memory, we just write to it.
|
||||
|
||||
#include "llama_adapter.h"
|
||||
#include "model_adapter.h"
|
||||
#include "expose.h"
|
||||
#include "extra.h"
|
||||
|
||||
|
|
29
extra.cpp
29
extra.cpp
|
@ -18,6 +18,35 @@
|
|||
#include <alloca.h>
|
||||
#endif
|
||||
|
||||
static clock_t bench_timer = 0;
|
||||
|
||||
void timer_start()
|
||||
{
|
||||
bench_timer = clock();
|
||||
}
|
||||
double timer_check()
|
||||
{
|
||||
double ticks = clock() - bench_timer;
|
||||
double time_taken = ((double)ticks) / CLOCKS_PER_SEC;
|
||||
return time_taken;
|
||||
}
|
||||
|
||||
void print_tok_vec(std::vector<int> &embd)
|
||||
{
|
||||
std::cout << "[";
|
||||
bool first = true;
|
||||
for (auto i : embd)
|
||||
{
|
||||
if (!first)
|
||||
{
|
||||
std::cout << ',';
|
||||
}
|
||||
first = false;
|
||||
std::cout << i;
|
||||
}
|
||||
std::cout << "]";
|
||||
}
|
||||
|
||||
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
||||
FileFormat check_file_format(const std::string & fname)
|
||||
{
|
||||
|
|
4
extra.h
4
extra.h
|
@ -24,6 +24,10 @@ enum FileFormat
|
|||
GGJT=3
|
||||
};
|
||||
|
||||
void print_tok_vec(std::vector<int> &embd);
|
||||
void timer_start();
|
||||
double timer_check();
|
||||
|
||||
FileFormat check_file_format(const std::string & fname);
|
||||
|
||||
std::vector<llama_token> legacy_llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos);
|
||||
|
|
403
gptj_adapter.cpp
Normal file
403
gptj_adapter.cpp
Normal file
|
@ -0,0 +1,403 @@
|
|||
//This is Concedo's shitty adapter for adding python bindings for llama
|
||||
|
||||
//Considerations:
|
||||
//Don't want to use pybind11 due to dependencies on MSVCC
|
||||
//ZERO or MINIMAL changes as possible to main.cpp - do not move their function declarations here!
|
||||
//Leave main.cpp UNTOUCHED, We want to be able to update the repo and pull any changes automatically.
|
||||
//No dynamic memory allocation! Setup structs with FIXED (known) shapes and sizes for ALL output fields
|
||||
//Python will ALWAYS provide the memory, we just write to it.
|
||||
|
||||
#include <time.h>
|
||||
#include "model_adapter.h"
|
||||
#include "otherarch/otherarch.h"
|
||||
|
||||
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
||||
static FileFormat file_format = FileFormat::FAIL;
|
||||
static llama_context_params ctx_params;
|
||||
static gpt_params params;
|
||||
static int n_past = 0;
|
||||
static int n_threads = 4;
|
||||
static int n_batch = 8;
|
||||
static std::string model;
|
||||
static llama_context *ctx;
|
||||
static std::vector<llama_token> last_n_tokens;
|
||||
static std::vector<llama_token> current_context_tokens;
|
||||
|
||||
void nnn()
|
||||
{
|
||||
ggml_time_init();
|
||||
const int64_t t_main_start_us = ggml_time_us();
|
||||
|
||||
gpt_params params;
|
||||
params.model = "models/gpt-j-6B/ggml-model.bin";
|
||||
|
||||
if (params.seed < 0) {
|
||||
params.seed = time(NULL);
|
||||
}
|
||||
|
||||
printf("%s: seed = %d\n", __func__, params.seed);
|
||||
|
||||
std::mt19937 rng(params.seed);
|
||||
|
||||
|
||||
int64_t t_load_us = 0;
|
||||
|
||||
gpt_vocab vocab;
|
||||
gptj_model model;
|
||||
|
||||
// load the model
|
||||
{
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
if (!legacy_gptj_model_load(params.model, model, vocab)) {
|
||||
fprintf(stderr, "%s: failed to load model from '%s'\n", __func__, params.model.c_str());
|
||||
return 1;
|
||||
}
|
||||
|
||||
t_load_us = ggml_time_us() - t_start_us;
|
||||
}
|
||||
|
||||
int n_past = 0;
|
||||
|
||||
int64_t t_sample_us = 0;
|
||||
int64_t t_predict_us = 0;
|
||||
|
||||
std::vector<float> logits;
|
||||
|
||||
// tokenize the prompt
|
||||
std::vector<gpt_vocab::id> embd_inp = ::gpt_tokenize(vocab, params.prompt);
|
||||
|
||||
params.n_predict = std::min(params.n_predict, model.hparams.n_ctx - (int) embd_inp.size());
|
||||
|
||||
printf("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
|
||||
printf("\n");
|
||||
|
||||
std::vector<gpt_vocab::id> embd;
|
||||
|
||||
// determine the required inference memory per token:
|
||||
size_t mem_per_token = 0;
|
||||
legacy_gptj_eval(model, params.n_threads, 0, { 0, 1, 2, 3 }, logits, mem_per_token);
|
||||
|
||||
for (int i = embd.size(); i < embd_inp.size() + params.n_predict; i++) {
|
||||
// predict
|
||||
if (embd.size() > 0) {
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
if (!legacy_gptj_eval(model, params.n_threads, n_past, embd, logits, mem_per_token)) {
|
||||
printf("Failed to predict\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
t_predict_us += ggml_time_us() - t_start_us;
|
||||
}
|
||||
|
||||
n_past += embd.size();
|
||||
embd.clear();
|
||||
|
||||
if (i >= embd_inp.size()) {
|
||||
// sample next token
|
||||
const int top_k = params.top_k;
|
||||
const float top_p = params.top_p;
|
||||
const float temp = params.temp;
|
||||
|
||||
const int n_vocab = model.hparams.n_vocab;
|
||||
|
||||
gpt_vocab::id id = 0;
|
||||
|
||||
{
|
||||
const int64_t t_start_sample_us = ggml_time_us();
|
||||
|
||||
id = gpt_sample_top_k_top_p(vocab, logits.data() + (logits.size() - n_vocab), top_k, top_p, temp, rng);
|
||||
|
||||
t_sample_us += ggml_time_us() - t_start_sample_us;
|
||||
}
|
||||
|
||||
// add it to the context
|
||||
embd.push_back(id);
|
||||
} else {
|
||||
// if here, it means we are still processing the input prompt
|
||||
for (int k = i; k < embd_inp.size(); k++) {
|
||||
embd.push_back(embd_inp[k]);
|
||||
if (embd.size() > params.n_batch) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
i += embd.size() - 1;
|
||||
}
|
||||
|
||||
// display text
|
||||
for (auto id : embd) {
|
||||
printf("%s", vocab.id_to_token[id].c_str());
|
||||
}
|
||||
fflush(stdout);
|
||||
|
||||
// end of text token
|
||||
if (embd.back() == 50256) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// report timing
|
||||
{
|
||||
const int64_t t_main_end_us = ggml_time_us();
|
||||
|
||||
printf("\n\n");
|
||||
printf("%s: mem per token = %8zu bytes\n", __func__, mem_per_token);
|
||||
printf("%s: load time = %8.2f ms\n", __func__, t_load_us/1000.0f);
|
||||
printf("%s: sample time = %8.2f ms\n", __func__, t_sample_us/1000.0f);
|
||||
printf("%s: predict time = %8.2f ms / %.2f ms per token\n", __func__, t_predict_us/1000.0f, t_predict_us/1000.0f/n_past);
|
||||
printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0f);
|
||||
}
|
||||
|
||||
ggml_free(model.ctx);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool llama_load_model(const load_model_inputs inputs, FileFormat in_file_format)
|
||||
{
|
||||
printf("System Info: %s\n", llama_print_system_info());
|
||||
|
||||
ctx_params = llama_context_default_params();
|
||||
|
||||
n_threads = inputs.threads;
|
||||
n_batch = inputs.batch_size;
|
||||
model = inputs.model_filename;
|
||||
|
||||
ctx_params.n_ctx = inputs.max_context_length;
|
||||
ctx_params.n_parts = inputs.n_parts_overwrite;
|
||||
ctx_params.seed = -1;
|
||||
ctx_params.f16_kv = inputs.f16_kv;
|
||||
ctx_params.logits_all = false;
|
||||
|
||||
file_format = in_file_format;
|
||||
|
||||
if (file_format == FileFormat::GGML || file_format == FileFormat::GGHF)
|
||||
{
|
||||
ctx = legacy_llama_init_from_file(model.c_str(), ctx_params);
|
||||
}
|
||||
else
|
||||
{
|
||||
ctx = llama_init_from_file(model.c_str(), ctx_params);
|
||||
}
|
||||
|
||||
if (ctx == NULL)
|
||||
{
|
||||
fprintf(stderr, "%s: error: failed to load model '%s'\n", __func__, model.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
if (file_format < FileFormat::GGJT)
|
||||
{
|
||||
printf("\n---\nWarning: Your model has an INVALID or OUTDATED format (ver %d). Please reconvert it for better results!\n---\n", file_format);
|
||||
}
|
||||
|
||||
//determine mem per token
|
||||
const std::vector<llama_token> tmp = {0, 1, 2, 3};
|
||||
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
generation_outputs llama_generate(const generation_inputs inputs, generation_outputs &output)
|
||||
{
|
||||
params.prompt = inputs.prompt;
|
||||
params.seed = inputs.seed;
|
||||
params.n_predict = inputs.max_length;
|
||||
params.top_k = inputs.top_k;
|
||||
params.top_p = inputs.top_p;
|
||||
params.temp = inputs.temperature;
|
||||
params.repeat_last_n = inputs.rep_pen_range;
|
||||
params.repeat_penalty = inputs.rep_pen;
|
||||
params.n_ctx = inputs.max_context_length;
|
||||
params.n_batch = n_batch;
|
||||
params.n_threads = n_threads;
|
||||
|
||||
if (params.repeat_last_n < 1)
|
||||
{
|
||||
params.repeat_last_n = 1;
|
||||
}
|
||||
if (params.top_k < 1)
|
||||
{
|
||||
params.top_k = 300; //to disable top_k we actually need to increase this value to a very high number
|
||||
}
|
||||
if (params.seed <= 0)
|
||||
{
|
||||
params.seed = time(NULL);
|
||||
}
|
||||
|
||||
params.prompt.insert(0, 1, ' ');
|
||||
|
||||
// tokenize the prompt
|
||||
std::vector<llama_token> embd_inp;
|
||||
if (file_format == 1)
|
||||
{
|
||||
embd_inp = ::legacy_llama_tokenize(ctx, params.prompt, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
embd_inp = ::llama_tokenize(ctx, params.prompt, true);
|
||||
}
|
||||
|
||||
//params.n_predict = std::min(params.n_predict, params.n_ctx - (int) embd_inp.size());
|
||||
//truncate to front of the prompt if its too long
|
||||
if (embd_inp.size() + params.n_predict > params.n_ctx)
|
||||
{
|
||||
int offset = embd_inp.size() - params.n_ctx + params.n_predict;
|
||||
embd_inp = std::vector<llama_token>(embd_inp.begin() + offset, embd_inp.end());
|
||||
}
|
||||
|
||||
//determine how much npast we have to rewind from the current state
|
||||
|
||||
std::vector<llama_token> embd;
|
||||
|
||||
int last_n_size = params.repeat_last_n;
|
||||
last_n_tokens.resize(last_n_size);
|
||||
|
||||
//display usage
|
||||
// std::string tst = " ";
|
||||
// char * tst2 = (char*)tst.c_str();
|
||||
// gpt_print_usage(1,&tst2,params);
|
||||
|
||||
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
|
||||
n_past = 0;
|
||||
|
||||
//fast forward the past based on identical tokens, stop once a divergence is noted
|
||||
int embd_inp_len = embd_inp.size();
|
||||
for (int i = 0; i < current_context_tokens.size(); ++i)
|
||||
{
|
||||
if (current_context_tokens[i] == embd_inp[i])
|
||||
{
|
||||
n_past += 1;
|
||||
last_n_tokens.push_back(current_context_tokens[i]);
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
if ((i + 2) >= embd_inp_len)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
last_n_tokens.erase(last_n_tokens.begin(), last_n_tokens.begin() + n_past);
|
||||
embd_inp.erase(embd_inp.begin(), embd_inp.begin() + n_past);
|
||||
|
||||
//if using BLAS and prompt is big enough, switch to single thread and use a huge batch
|
||||
bool blasmode = (embd_inp.size() >= 32 && ggml_cpu_has_blas());
|
||||
int original_batch = params.n_batch;
|
||||
int original_threads = params.n_threads;
|
||||
if (blasmode)
|
||||
{
|
||||
params.n_batch = 512;
|
||||
params.n_threads = 1;
|
||||
}
|
||||
|
||||
current_context_tokens.resize(n_past);
|
||||
|
||||
int remaining_tokens = params.n_predict;
|
||||
int input_consumed = 0;
|
||||
std::mt19937 rng(params.seed);
|
||||
std::string concat_output = "";
|
||||
|
||||
bool startedsampling = false;
|
||||
|
||||
timer_start();
|
||||
double time1 = 0, time2 = 0;
|
||||
unsigned int embd_inp_size = embd_inp.size();
|
||||
printf("\n");
|
||||
|
||||
while (remaining_tokens > 0)
|
||||
{
|
||||
llama_token id = 0;
|
||||
// predict
|
||||
unsigned int embdsize = embd.size();
|
||||
if (embdsize > 0)
|
||||
{
|
||||
//print progress
|
||||
if (!startedsampling)
|
||||
{
|
||||
printf("\rProcessing Prompt%s (%d / %d tokens)", (blasmode ? " [BLAS]" : ""), input_consumed, embd_inp_size);
|
||||
}
|
||||
else
|
||||
{
|
||||
printf("\rGenerating (%d / %d tokens)", (1 + params.n_predict - remaining_tokens), params.n_predict);
|
||||
}
|
||||
//printf("\nnp:%d embd:%d txt:%s",n_past,embd.size(),llama_token_to_str(ctx, embd[0]));
|
||||
if (llama_eval(ctx, embd.data(), embdsize, n_past, params.n_threads))
|
||||
{
|
||||
fprintf(stderr, "Failed to predict\n");
|
||||
snprintf(output.text, sizeof(output.text), "%s", "");
|
||||
output.status = 0;
|
||||
return output;
|
||||
}
|
||||
}
|
||||
|
||||
n_past += embd.size();
|
||||
embd.clear();
|
||||
if ((int)embd_inp.size() <= input_consumed)
|
||||
{
|
||||
// out of user input, sample next token
|
||||
const float top_k = params.top_k;
|
||||
const float top_p = params.top_p;
|
||||
const float temp = params.temp;
|
||||
const float repeat_penalty = params.repeat_penalty;
|
||||
|
||||
if (!startedsampling)
|
||||
{
|
||||
startedsampling = true;
|
||||
params.n_batch = original_batch;
|
||||
params.n_threads = original_threads;
|
||||
time1 = timer_check();
|
||||
timer_start();
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
{
|
||||
auto logits = llama_get_logits(ctx);
|
||||
// set the logit of the eos token (2) to zero to avoid sampling it
|
||||
logits[llama_token_eos()] = 0;
|
||||
//set logits of opening square bracket to zero.
|
||||
logits[518] = 0;
|
||||
logits[29961] = 0;
|
||||
|
||||
id = llama_sample_top_p_top_k(ctx, last_n_tokens.data(), last_n_tokens.size(), top_k, top_p, temp, repeat_penalty);
|
||||
|
||||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
last_n_tokens.push_back(id);
|
||||
current_context_tokens.push_back(id);
|
||||
}
|
||||
|
||||
// add it to the context
|
||||
embd.push_back(id);
|
||||
|
||||
// decrement remaining sampling budget
|
||||
--remaining_tokens;
|
||||
//printf("\nid:%d word:%s\n",id,llama_token_to_str(ctx, id));
|
||||
concat_output += llama_token_to_str(ctx, id);
|
||||
}
|
||||
else
|
||||
{
|
||||
// some user input remains from prompt or interaction, forward it to processing
|
||||
while ((int)embd_inp.size() > input_consumed)
|
||||
{
|
||||
embd.push_back(embd_inp[input_consumed]);
|
||||
last_n_tokens.erase(last_n_tokens.begin());
|
||||
last_n_tokens.push_back(embd_inp[input_consumed]);
|
||||
current_context_tokens.push_back(embd_inp[input_consumed]);
|
||||
++input_consumed;
|
||||
if ((int)embd.size() >= params.n_batch)
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
time2 = timer_check();
|
||||
printf("\nTime Taken - Processing:%.1fs, Generation:%.1fs, Total:%.1fs", time1, time2, (time1 + time2));
|
||||
|
||||
output.status = 1;
|
||||
snprintf(output.text, sizeof(output.text), "%s", concat_output.c_str());
|
||||
return output;
|
||||
}
|
|
@ -9,12 +9,11 @@
|
|||
|
||||
#include <time.h>
|
||||
#include "./examples/main/main.cpp"
|
||||
#include "extra.h"
|
||||
#include "ggml.h"
|
||||
#include "llama_adapter.h"
|
||||
#include "model_adapter.h"
|
||||
|
||||
//return val: 0=fail, 1=(original ggml, alpaca), 2=(ggmf), 3=(ggjt)
|
||||
static int file_format = 0;
|
||||
static FileFormat file_format = FileFormat::FAIL;
|
||||
static llama_context_params ctx_params;
|
||||
static gpt_params params;
|
||||
static int n_past = 0;
|
||||
|
@ -25,36 +24,7 @@ static llama_context *ctx;
|
|||
static std::vector<llama_token> last_n_tokens;
|
||||
static std::vector<llama_token> current_context_tokens;
|
||||
|
||||
static clock_t bench_timer = 0;
|
||||
|
||||
void timer_start()
|
||||
{
|
||||
bench_timer = clock();
|
||||
}
|
||||
double timer_check()
|
||||
{
|
||||
double ticks = clock() - bench_timer;
|
||||
double time_taken = ((double)ticks) / CLOCKS_PER_SEC;
|
||||
return time_taken;
|
||||
}
|
||||
|
||||
void print_tok_vec(std::vector<llama_token> &embd)
|
||||
{
|
||||
std::cout << "[";
|
||||
bool first = true;
|
||||
for (auto i : embd)
|
||||
{
|
||||
if (!first)
|
||||
{
|
||||
std::cout << ',';
|
||||
}
|
||||
first = false;
|
||||
std::cout << i;
|
||||
}
|
||||
std::cout << "]";
|
||||
}
|
||||
|
||||
bool llama_load_model(const load_model_inputs inputs, FileFormat file_format)
|
||||
bool llama_load_model(const load_model_inputs inputs, FileFormat in_file_format)
|
||||
{
|
||||
printf("System Info: %s\n", llama_print_system_info());
|
||||
|
||||
|
@ -70,6 +40,8 @@ bool llama_load_model(const load_model_inputs inputs, FileFormat file_format)
|
|||
ctx_params.f16_kv = inputs.f16_kv;
|
||||
ctx_params.logits_all = false;
|
||||
|
||||
file_format = in_file_format;
|
||||
|
||||
if (file_format == FileFormat::GGML || file_format == FileFormat::GGHF)
|
||||
{
|
||||
ctx = legacy_llama_init_from_file(model.c_str(), ctx_params);
|
||||
|
|
BIN
llamacpp.dll
BIN
llamacpp.dll
Binary file not shown.
Binary file not shown.
|
@ -334,9 +334,9 @@ def main(args):
|
|||
print(f"Starting Kobold HTTP Server on port {args.port}")
|
||||
epurl = ""
|
||||
if args.host=="":
|
||||
epurl = f"http://localhost:{args.port}" + ("?streaming=1" if not args.nostream else "")
|
||||
epurl = f"http://localhost:{args.port}" + ("?streaming=1" if args.stream else "")
|
||||
else:
|
||||
epurl = f"http://{args.host}:{args.port}?host={args.host}" + ("&streaming=1" if not args.nostream else "")
|
||||
epurl = f"http://{args.host}:{args.port}?host={args.host}" + ("&streaming=1" if args.stream else "")
|
||||
|
||||
|
||||
print(f"Please connect to custom endpoint at {epurl}")
|
||||
|
@ -351,7 +351,7 @@ if __name__ == '__main__':
|
|||
parser.add_argument("--host", help="Host IP to listen on. If empty, all routable interfaces are accepted.", default="")
|
||||
default_threads = (os.cpu_count() if os.cpu_count()<=6 else max(6,os.cpu_count()-2))
|
||||
parser.add_argument("--threads", help="Use a custom number of threads if specified. Otherwise, uses an amount based on CPU cores", type=int, default=default_threads)
|
||||
parser.add_argument("--nostream", help="Disables pseudo streaming", action='store_true')
|
||||
parser.add_argument("--stream", help="Uses pseudo streaming", action='store_true')
|
||||
parser.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true')
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue