Merge remote-tracking branch 'origin/master' into concedo_experimental
# Conflicts: # README.md
This commit is contained in:
commit
d0d3c4f32b
3 changed files with 21 additions and 3 deletions
|
@ -40,8 +40,10 @@ int main(int argc, char ** argv) {
|
||||||
// this allocates all Metal resources and memory buffers
|
// this allocates all Metal resources and memory buffers
|
||||||
auto * ctx_metal = ggml_metal_init();
|
auto * ctx_metal = ggml_metal_init();
|
||||||
|
|
||||||
ggml_metal_add_buffer(ctx_metal, "data", ggml_get_mem_buffer(ctx_data), ggml_get_mem_size(ctx_data));
|
const size_t max_size_data = ggml_get_max_tensor_size(ctx_data);
|
||||||
ggml_metal_add_buffer(ctx_metal, "eval", ggml_get_mem_buffer(ctx_eval), ggml_get_mem_size(ctx_eval));
|
const size_t max_size_eval = ggml_get_max_tensor_size(ctx_eval);
|
||||||
|
ggml_metal_add_buffer(ctx_metal, "data", ggml_get_mem_buffer(ctx_data), ggml_get_mem_size(ctx_data), max_size_data);
|
||||||
|
ggml_metal_add_buffer(ctx_metal, "eval", ggml_get_mem_buffer(ctx_eval), ggml_get_mem_size(ctx_eval), max_size_eval);
|
||||||
|
|
||||||
// main
|
// main
|
||||||
{
|
{
|
||||||
|
|
|
@ -225,7 +225,7 @@ maxhordectx = 1024
|
||||||
maxhordelen = 256
|
maxhordelen = 256
|
||||||
modelbusy = False
|
modelbusy = False
|
||||||
defaultport = 5001
|
defaultport = 5001
|
||||||
KcppVersion = "1.31.2"
|
KcppVersion = "1.32"
|
||||||
showdebug = True
|
showdebug = True
|
||||||
|
|
||||||
class ServerRequestHandler(http.server.SimpleHTTPRequestHandler):
|
class ServerRequestHandler(http.server.SimpleHTTPRequestHandler):
|
||||||
|
|
16
llama.cpp
16
llama.cpp
|
@ -19,6 +19,11 @@
|
||||||
#ifdef GGML_USE_METAL
|
#ifdef GGML_USE_METAL
|
||||||
#include "ggml-metal.h"
|
#include "ggml-metal.h"
|
||||||
#endif
|
#endif
|
||||||
|
#ifdef GGML_USE_K_QUANTS
|
||||||
|
#ifndef QK_K
|
||||||
|
#define QK_K 256
|
||||||
|
#endif
|
||||||
|
#endif
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <ctime>
|
#include <ctime>
|
||||||
|
@ -2491,6 +2496,17 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
} else {
|
} else {
|
||||||
new_type = quantized_type;
|
new_type = quantized_type;
|
||||||
#ifdef GGML_USE_K_QUANTS
|
#ifdef GGML_USE_K_QUANTS
|
||||||
|
if (quantized_type == GGML_TYPE_Q2_K || quantized_type == GGML_TYPE_Q3_K || quantized_type == GGML_TYPE_Q4_K ||
|
||||||
|
quantized_type == GGML_TYPE_Q5_K || quantized_type == GGML_TYPE_Q6_K) {
|
||||||
|
int nx = tensor.ne.at(0);
|
||||||
|
int ny = tensor.ne.at(0);
|
||||||
|
if (nx % QK_K != 0 || ny % QK_K != 0) {
|
||||||
|
fprintf(stderr, "\n\n========================= Tensor sizes %d x %d are not divisible by %d\n",nx,ny,QK_K);
|
||||||
|
fprintf(stderr, "This is required to be able to use k-quants for now!\n");
|
||||||
|
fprintf(stderr, "========================================================================================\n\n");
|
||||||
|
throw std::runtime_error("Unsupported tensor size encountered\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
if (tensor.name == "output.weight") {
|
if (tensor.name == "output.weight") {
|
||||||
new_type = GGML_TYPE_Q6_K;
|
new_type = GGML_TYPE_Q6_K;
|
||||||
} else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
|
} else if (tensor.name.find("attention.wv.weight") != std::string::npos) {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue