updated lite, fixed some encoding issues
This commit is contained in:
parent
75e4548821
commit
24127ebf98
3 changed files with 28 additions and 16 deletions
14
klite.embd
14
klite.embd
File diff suppressed because one or more lines are too long
14
koboldcpp.py
14
koboldcpp.py
|
@ -191,6 +191,14 @@ def generate(prompt,max_length=20, max_context_length=512,temperature=0.8,top_k=
|
||||||
return ret.text.decode("UTF-8","ignore")
|
return ret.text.decode("UTF-8","ignore")
|
||||||
return ""
|
return ""
|
||||||
|
|
||||||
|
def utfprint(str):
|
||||||
|
try:
|
||||||
|
print(str)
|
||||||
|
except UnicodeEncodeError:
|
||||||
|
# Replace or omit the problematic character
|
||||||
|
utf_string = str.encode('ascii', 'ignore').decode('ascii')
|
||||||
|
print(utf_string)
|
||||||
|
|
||||||
#################################################################
|
#################################################################
|
||||||
### A hacky simple HTTP server simulating a kobold api by Concedo
|
### A hacky simple HTTP server simulating a kobold api by Concedo
|
||||||
### we are intentionally NOT using flask, because we want MINIMAL dependencies
|
### we are intentionally NOT using flask, because we want MINIMAL dependencies
|
||||||
|
@ -301,7 +309,7 @@ class ServerRequestHandler(http.server.SimpleHTTPRequestHandler):
|
||||||
self.send_response(503)
|
self.send_response(503)
|
||||||
self.end_headers()
|
self.end_headers()
|
||||||
return
|
return
|
||||||
print("\nInput: " + json.dumps(genparams))
|
utfprint("\nInput: " + json.dumps(genparams))
|
||||||
|
|
||||||
modelbusy = True
|
modelbusy = True
|
||||||
if kai_api_flag:
|
if kai_api_flag:
|
||||||
|
@ -327,7 +335,7 @@ class ServerRequestHandler(http.server.SimpleHTTPRequestHandler):
|
||||||
seed=-1,
|
seed=-1,
|
||||||
stop_sequence=genparams.get('stop_sequence', [])
|
stop_sequence=genparams.get('stop_sequence', [])
|
||||||
)
|
)
|
||||||
print("\nOutput: " + recvtxt)
|
utfprint("\nOutput: " + recvtxt)
|
||||||
res = {"results": [{"text": recvtxt}]}
|
res = {"results": [{"text": recvtxt}]}
|
||||||
else:
|
else:
|
||||||
recvtxt = generate(
|
recvtxt = generate(
|
||||||
|
@ -343,7 +351,7 @@ class ServerRequestHandler(http.server.SimpleHTTPRequestHandler):
|
||||||
seed=-1,
|
seed=-1,
|
||||||
stop_sequence=genparams.get('stop_sequence', [])
|
stop_sequence=genparams.get('stop_sequence', [])
|
||||||
)
|
)
|
||||||
print("\nOutput: " + recvtxt)
|
utfprint("\nOutput: " + recvtxt)
|
||||||
res = {"data": {"seqs":[recvtxt]}}
|
res = {"data": {"seqs":[recvtxt]}}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
|
|
@ -1071,17 +1071,17 @@ static void llama_v2_model_load_internal(
|
||||||
for (int i = 0; i < n_gpu; ++i) {
|
for (int i = 0; i < n_gpu; ++i) {
|
||||||
const auto & layer = model.layers[i];
|
const auto & layer = model.layers[i];
|
||||||
|
|
||||||
ggml_v2_cuda_transform_tensor(layer.wq); vram_total += ggml_v2_nbytes(layer.wq);
|
ggml_cuda_transform_tensor(layer.wq); vram_total += ggml_v2_nbytes(layer.wq);
|
||||||
ggml_v2_cuda_transform_tensor(layer.wk); vram_total += ggml_v2_nbytes(layer.wk);
|
ggml_cuda_transform_tensor(layer.wk); vram_total += ggml_v2_nbytes(layer.wk);
|
||||||
ggml_v2_cuda_transform_tensor(layer.wv); vram_total += ggml_v2_nbytes(layer.wv);
|
ggml_cuda_transform_tensor(layer.wv); vram_total += ggml_v2_nbytes(layer.wv);
|
||||||
ggml_v2_cuda_transform_tensor(layer.wo); vram_total += ggml_v2_nbytes(layer.wo);
|
ggml_cuda_transform_tensor(layer.wo); vram_total += ggml_v2_nbytes(layer.wo);
|
||||||
ggml_v2_cuda_transform_tensor(layer.w1); vram_total += ggml_v2_nbytes(layer.w1);
|
ggml_cuda_transform_tensor(layer.w1); vram_total += ggml_v2_nbytes(layer.w1);
|
||||||
ggml_v2_cuda_transform_tensor(layer.w2); vram_total += ggml_v2_nbytes(layer.w2);
|
ggml_cuda_transform_tensor(layer.w2); vram_total += ggml_v2_nbytes(layer.w2);
|
||||||
ggml_v2_cuda_transform_tensor(layer.w3); vram_total += ggml_v2_nbytes(layer.w3);
|
ggml_cuda_transform_tensor(layer.w3); vram_total += ggml_v2_nbytes(layer.w3);
|
||||||
}
|
}
|
||||||
if (n_gpu_layers > (int) hparams.n_layer) {
|
if (n_gpu_layers > (int) hparams.n_layer) {
|
||||||
fprintf(stderr, "%s: [cublas] offloading output layer to GPU\n", __func__);
|
fprintf(stderr, "%s: [cublas] offloading output layer to GPU\n", __func__);
|
||||||
ggml_v2_cuda_transform_tensor(model.output); vram_total += ggml_v2_nbytes(model.output);
|
ggml_cuda_transform_tensor(model.output); vram_total += ggml_v2_nbytes(model.output);
|
||||||
}
|
}
|
||||||
|
|
||||||
fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
fprintf(stderr, "%s: [cublas] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue