Merged the upstream updates for model loading code, and ditched the legacy llama loaders since they were no longer needed.
This commit is contained in:
commit
f53238f570
20 changed files with 1234 additions and 1446 deletions
6
Makefile
6
Makefile
|
@ -37,7 +37,7 @@ LDFLAGS =
|
|||
|
||||
#lets try enabling everything
|
||||
CFLAGS += -pthread -s
|
||||
CXXFLAGS += -pthread -s
|
||||
CXXFLAGS += -pthread -s -Wno-multichar
|
||||
|
||||
# OS specific
|
||||
# TODO: support Windows
|
||||
|
@ -121,7 +121,7 @@ BLAS_BUILD =
|
|||
ifeq ($(OS),Windows_NT)
|
||||
BLAS_BUILD = $(CXX) $(CXXFLAGS) ggml_blas.o ggml_v1.o expose.o common.o llama_adapter.o gpttype_adapter.o libopenblas.lib -shared -o koboldcpp_blas.dll $(LDFLAGS)
|
||||
else
|
||||
BLAS_BUILD = @echo 'Your OS $(OS) does not appear to be Windows. If you want to use openblas, please install it seperately, then link it manually with LLAMA_OPENBLAS=1'
|
||||
BLAS_BUILD = @echo 'Your OS $(OS) does not appear to be Windows. If you want to use openblas, please install it seperately, then link it manually with LLAMA_OPENBLAS=1. This is just a reminder, not an error.'
|
||||
endif
|
||||
|
||||
#
|
||||
|
@ -154,7 +154,7 @@ ggml_blas.o: ggml.c ggml.h
|
|||
ggml_v1.o: otherarch/ggml_v1.c otherarch/ggml_v1.h
|
||||
$(CC) $(CFLAGS) -c otherarch/ggml_v1.c -o ggml_v1.o
|
||||
|
||||
llama.o: llama.cpp llama.h
|
||||
llama.o: llama.cpp llama.h llama_internal.h
|
||||
$(CXX) $(CXXFLAGS) -c llama.cpp -o llama.o
|
||||
|
||||
common.o: examples/common.cpp examples/common.h
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
#include "common.h"
|
||||
|
||||
#include "ggml.h"
|
||||
|
||||
#include <cassert>
|
||||
#include <cstring>
|
||||
#include <fstream>
|
||||
|
@ -161,6 +159,8 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
|||
params.use_color = true;
|
||||
} else if (arg == "--mlock") {
|
||||
params.use_mlock = true;
|
||||
} else if (arg == "--no-mmap") {
|
||||
params.use_mmap = false;
|
||||
} else if (arg == "--mtest") {
|
||||
params.mem_test = true;
|
||||
} else if (arg == "--verbose-prompt") {
|
||||
|
@ -240,9 +240,12 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
|||
fprintf(stderr, " -b N, --batch_size N batch size for prompt processing (default: %d)\n", params.n_batch);
|
||||
fprintf(stderr, " --perplexity compute perplexity over the prompt\n");
|
||||
fprintf(stderr, " --keep number of tokens to keep from the initial prompt (default: %d, -1 = all)\n", params.n_keep);
|
||||
if (ggml_mlock_supported()) {
|
||||
if (llama_mlock_supported()) {
|
||||
fprintf(stderr, " --mlock force system to keep model in RAM rather than swapping or compressing\n");
|
||||
}
|
||||
if (llama_mmap_supported()) {
|
||||
fprintf(stderr, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
|
||||
}
|
||||
fprintf(stderr, " --mtest compute maximum memory usage\n");
|
||||
fprintf(stderr, " --verbose-prompt print prompt before generation\n");
|
||||
fprintf(stderr, " -m FNAME, --model FNAME\n");
|
||||
|
|
|
@ -47,6 +47,7 @@ struct gpt_params {
|
|||
bool instruct = false; // instruction mode (used for Alpaca models)
|
||||
bool ignore_eos = false; // do not stop generating after eos
|
||||
bool perplexity = false; // compute perplexity over the prompt
|
||||
bool use_mmap = true; // use mmap for faster loads
|
||||
bool use_mlock = false; // use mlock to keep model in memory
|
||||
bool mem_test = false; // compute maximum memory usage
|
||||
bool verbose_prompt = false; // print prompt tokens before generation
|
||||
|
|
|
@ -38,6 +38,7 @@ int main(int argc, char ** argv) {
|
|||
lparams.seed = params.seed;
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.logits_all = params.perplexity;
|
||||
lparams.use_mmap = params.use_mmap;
|
||||
lparams.use_mlock = params.use_mlock;
|
||||
lparams.embedding = params.embedding;
|
||||
|
||||
|
|
|
@ -97,6 +97,7 @@ int main(int argc, char ** argv) {
|
|||
lparams.n_parts = params.n_parts;
|
||||
lparams.seed = params.seed;
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.use_mmap = params.use_mmap;
|
||||
lparams.use_mlock = params.use_mlock;
|
||||
|
||||
ctx = llama_init_from_file(params.model.c_str(), lparams);
|
||||
|
|
|
@ -115,6 +115,7 @@ int main(int argc, char ** argv) {
|
|||
lparams.seed = params.seed;
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.logits_all = params.perplexity;
|
||||
lparams.use_mmap = params.use_mmap;
|
||||
lparams.use_mlock = params.use_mlock;
|
||||
lparams.embedding = params.embedding;
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#include "ggml.h"
|
||||
#include "llama.h"
|
||||
#include "llama_internal.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
|
@ -266,15 +267,13 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
}
|
||||
|
||||
// Sort tensors for consistent output
|
||||
const auto tensors = llama_internal_get_tensor_map(ctx);
|
||||
std::map<std::string, struct ggml_tensor *> tensors_sorted { tensors.begin(), tensors.end() };
|
||||
const auto &tensors = llama_internal_get_tensor_map(ctx);
|
||||
|
||||
// check layer tensors
|
||||
int included_layers = 0;
|
||||
int64_t max_nelements = 0;
|
||||
bool is_f16 = false;
|
||||
for (const auto& kv_tensor : tensors_sorted) {
|
||||
for (const auto& kv_tensor : tensors) {
|
||||
if (!layer_included(params, kv_tensor.first)) {
|
||||
continue;
|
||||
}
|
||||
|
@ -315,7 +314,7 @@ int main(int argc, char ** argv) {
|
|||
|
||||
error_stats global_stats {};
|
||||
|
||||
for (const auto& kv_tensor : tensors_sorted) {
|
||||
for (const auto& kv_tensor : tensors) {
|
||||
if (!layer_included(params, kv_tensor.first)) {
|
||||
continue;
|
||||
}
|
||||
|
|
1
expose.h
1
expose.h
|
@ -8,6 +8,7 @@ struct load_model_inputs
|
|||
const bool f16_kv;
|
||||
const char *model_filename;
|
||||
const int n_parts_overwrite = -1;
|
||||
const bool use_mmap;
|
||||
};
|
||||
struct generation_inputs
|
||||
{
|
||||
|
|
78
ggml.c
78
ggml.c
|
@ -97,17 +97,6 @@ typedef void* thread_ret_t;
|
|||
#define static_assert(cond, msg) _Static_assert(cond, msg)
|
||||
#endif
|
||||
|
||||
#define GGML_MLOCK_SUPPORT 0
|
||||
|
||||
#ifdef __has_include
|
||||
#if __has_include(<sys/mman.h>)
|
||||
#undef GGML_MLOCK_SUPPORT
|
||||
#define GGML_MLOCK_SUPPORT 1
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
||||
/*#define GGML_PERF*/
|
||||
#define GGML_DEBUG 0
|
||||
#define GGML_GELU_FP16
|
||||
|
@ -2690,21 +2679,6 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
|||
|
||||
static_assert(GGML_OP_COUNT == 35, "GGML_OP_COUNT != 35");
|
||||
|
||||
//
|
||||
// ggml object
|
||||
//
|
||||
|
||||
struct ggml_object {
|
||||
size_t offs;
|
||||
size_t size;
|
||||
|
||||
struct ggml_object * next;
|
||||
|
||||
char padding[8];
|
||||
};
|
||||
|
||||
static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
|
||||
|
||||
static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
|
||||
static_assert(sizeof(struct ggml_tensor)%GGML_MEM_ALIGN == 0, "ggml_tensor size must be a multiple of GGML_MEM_ALIGN");
|
||||
|
||||
|
@ -2716,7 +2690,6 @@ struct ggml_context {
|
|||
size_t mem_size;
|
||||
void * mem_buffer;
|
||||
bool mem_buffer_owned;
|
||||
bool mem_buffer_mlocked;
|
||||
bool no_alloc;
|
||||
|
||||
int n_objects;
|
||||
|
@ -3003,7 +2976,6 @@ struct ggml_context * ggml_init(struct ggml_init_params params) {
|
|||
/*.mem_size =*/ params.mem_size,
|
||||
/*.mem_buffer =*/ params.mem_buffer ? params.mem_buffer : malloc(params.mem_size),
|
||||
/*.mem_buffer_owned =*/ params.mem_buffer ? false : true,
|
||||
/*.mem_buffer_mlocked =*/ false,
|
||||
/*.no_alloc =*/ params.no_alloc,
|
||||
/*.n_objects =*/ 0,
|
||||
/*.objects_begin =*/ NULL,
|
||||
|
@ -3036,14 +3008,6 @@ void ggml_free(struct ggml_context * ctx) {
|
|||
GGML_PRINT_DEBUG("%s: context %d with %d objects has been freed. memory used = %zu\n",
|
||||
__func__, i, ctx->n_objects, ctx->objects_end->offs + ctx->objects_end->size);
|
||||
|
||||
#if GGML_MLOCK_SUPPORT
|
||||
if (ctx->mem_buffer_mlocked) {
|
||||
if (munlock(ctx->mem_buffer, ctx->mem_size)) {
|
||||
fprintf(stderr, "%s: failed to munlock buffer: %s\n", __func__, strerror(errno));
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (ctx->mem_buffer_owned) {
|
||||
free(ctx->mem_buffer);
|
||||
}
|
||||
|
@ -3072,48 +3036,6 @@ size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch)
|
|||
return result;
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
#define MLOCK_SUGGESTION \
|
||||
"Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
|
||||
"decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
|
||||
#else
|
||||
#define MLOCK_SUGGESTION \
|
||||
"Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
|
||||
#endif
|
||||
|
||||
bool ggml_mlock_supported(void) {
|
||||
return GGML_MLOCK_SUPPORT;
|
||||
}
|
||||
|
||||
bool ggml_mlock(
|
||||
struct ggml_context * ctx,
|
||||
const void *opt_extra_addr,
|
||||
size_t opt_extra_len,
|
||||
char **err_p) {
|
||||
// TODO: Use SetProcessWorkingSetSize() + VirtualLock() on WIN32
|
||||
#if GGML_MLOCK_SUPPORT
|
||||
if (ctx->mem_buffer_mlocked) {
|
||||
return true;
|
||||
}
|
||||
if (mlock(ctx->mem_buffer, ctx->mem_size) ||
|
||||
(opt_extra_len &&
|
||||
mlock(opt_extra_addr, opt_extra_len))) {
|
||||
if ((*err_p = malloc(1024))) {
|
||||
snprintf(*err_p, 1024,
|
||||
"failed to mlock %zu-byte buffer: %s\n" MLOCK_SUGGESTION,
|
||||
ctx->mem_size + opt_extra_len,
|
||||
strerror(errno));
|
||||
}
|
||||
return false;
|
||||
}
|
||||
ctx->mem_buffer_mlocked = true;
|
||||
return true;
|
||||
#else // GGML_MLOCK_SUPPORT
|
||||
*err_p = strdup("can't mlock because it's not supported on this system");
|
||||
return false;
|
||||
#endif // GGML_MLOCK_SUPPORT
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
struct ggml_tensor * ggml_new_tensor_impl(
|
||||
|
|
20
ggml.h
20
ggml.h
|
@ -253,6 +253,19 @@ enum ggml_op {
|
|||
GGML_OP_COUNT,
|
||||
};
|
||||
|
||||
|
||||
// ggml object
|
||||
struct ggml_object {
|
||||
size_t offs;
|
||||
size_t size;
|
||||
|
||||
struct ggml_object * next;
|
||||
|
||||
char padding[8];
|
||||
};
|
||||
|
||||
static const size_t GGML_OBJECT_SIZE = sizeof(struct ggml_object);
|
||||
|
||||
// n-dimensional tensor
|
||||
struct ggml_tensor {
|
||||
enum ggml_type type;
|
||||
|
@ -344,13 +357,6 @@ size_t ggml_used_mem(const struct ggml_context * ctx);
|
|||
|
||||
size_t ggml_set_scratch(struct ggml_context * ctx, struct ggml_scratch scratch);
|
||||
|
||||
bool ggml_mlock_supported(void);
|
||||
bool ggml_mlock(
|
||||
struct ggml_context * ctx,
|
||||
const void *opt_extra_addr,
|
||||
size_t opt_extra_len,
|
||||
char **err_p);
|
||||
|
||||
struct ggml_tensor * ggml_new_tensor(
|
||||
struct ggml_context * ctx,
|
||||
enum ggml_type type,
|
||||
|
|
BIN
koboldcpp.dll
Normal file
BIN
koboldcpp.dll
Normal file
Binary file not shown.
11
koboldcpp.py
11
koboldcpp.py
|
@ -14,7 +14,8 @@ class load_model_inputs(ctypes.Structure):
|
|||
("batch_size", ctypes.c_int),
|
||||
("f16_kv", ctypes.c_bool),
|
||||
("model_filename", ctypes.c_char_p),
|
||||
("n_parts_overwrite", ctypes.c_int)]
|
||||
("n_parts_overwrite", ctypes.c_int),
|
||||
("use_mmap", ctypes.c_bool)]
|
||||
|
||||
class generation_inputs(ctypes.Structure):
|
||||
_fields_ = [("seed", ctypes.c_int),
|
||||
|
@ -53,7 +54,7 @@ def init_library():
|
|||
handle.generate.argtypes = [generation_inputs, ctypes.c_wchar_p] #apparently needed for osx to work. i duno why they need to interpret it that way but whatever
|
||||
handle.generate.restype = generation_outputs
|
||||
|
||||
def load_model(model_filename,batch_size=8,max_context_length=512,n_parts_overwrite=-1,threads=6):
|
||||
def load_model(model_filename,batch_size=8,max_context_length=512,n_parts_overwrite=-1,threads=6,use_mmap=False):
|
||||
inputs = load_model_inputs()
|
||||
inputs.model_filename = model_filename.encode("UTF-8")
|
||||
inputs.batch_size = batch_size
|
||||
|
@ -61,6 +62,7 @@ def load_model(model_filename,batch_size=8,max_context_length=512,n_parts_overwr
|
|||
inputs.threads = threads
|
||||
inputs.n_parts_overwrite = n_parts_overwrite
|
||||
inputs.f16_kv = True
|
||||
inputs.use_mmap = use_mmap
|
||||
ret = handle.load_model(inputs)
|
||||
return ret
|
||||
|
||||
|
@ -347,7 +349,7 @@ def main(args):
|
|||
mdl_nparts = sum(1 for n in range(1, 9) if os.path.exists(f"{ggml_selected_file}.{n}")) + 1
|
||||
modelname = os.path.abspath(ggml_selected_file)
|
||||
print(f"Loading model: {modelname} \n[Parts: {mdl_nparts}, Threads: {args.threads}]")
|
||||
loadok = load_model(modelname,8,maxctx,mdl_nparts,args.threads)
|
||||
loadok = load_model(modelname,8,maxctx,mdl_nparts,args.threads,args.usemmap)
|
||||
print("Load Model OK: " + str(loadok))
|
||||
|
||||
if not loadok:
|
||||
|
@ -369,7 +371,7 @@ def main(args):
|
|||
if args.host=="":
|
||||
epurl = f"http://localhost:{args.port}" + ("?streaming=1" if args.stream else "")
|
||||
else:
|
||||
epurl = f"http://{args.host}:{args.port}" + ("&streaming=1" if args.stream else "")
|
||||
epurl = f"http://{args.host}:{args.port}" + ("?streaming=1" if args.stream else "")
|
||||
|
||||
|
||||
print(f"Please connect to custom endpoint at {epurl}")
|
||||
|
@ -394,5 +396,6 @@ if __name__ == '__main__':
|
|||
parser.add_argument("--psutil_set_threads", help="Experimental flag. If set, uses psutils to determine thread count based on physical cores.", action='store_true')
|
||||
parser.add_argument("--stream", help="Uses pseudo streaming", action='store_true')
|
||||
parser.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true')
|
||||
parser.add_argument("--usemmap", help="Use mmap to load newer models (default false)", action='store_true')
|
||||
args = parser.parse_args()
|
||||
main(args)
|
||||
|
|
BIN
koboldcpp_blas.dll
Normal file
BIN
koboldcpp_blas.dll
Normal file
Binary file not shown.
13
llama.h
13
llama.h
|
@ -55,6 +55,7 @@ extern "C" {
|
|||
bool f16_kv; // use fp16 for KV cache
|
||||
bool logits_all; // the llama_eval() call computes all logits, not just the last one
|
||||
bool vocab_only; // only load the vocabulary, no weights
|
||||
bool use_mmap; // use mmap if possible
|
||||
bool use_mlock; // force system to keep model in RAM
|
||||
bool embedding; // embedding mode only
|
||||
|
||||
|
@ -66,6 +67,9 @@ extern "C" {
|
|||
|
||||
LLAMA_API struct llama_context_params llama_context_default_params();
|
||||
|
||||
LLAMA_API bool llama_mmap_supported();
|
||||
LLAMA_API bool llama_mlock_supported();
|
||||
|
||||
// Various functions for loading a ggml llama model.
|
||||
// Allocate (almost) all memory needed for the model.
|
||||
// Return NULL on failure
|
||||
|
@ -164,13 +168,6 @@ extern "C" {
|
|||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
||||
#include <string>
|
||||
#include <unordered_map>
|
||||
//
|
||||
// Internal function exposed for tests and benchmarks
|
||||
//
|
||||
std::unordered_map<std::string, struct ggml_tensor *>& llama_internal_get_tensor_map(struct llama_context * ctx);
|
||||
#endif
|
||||
|
||||
#endif
|
||||
#endif // LLAMA_H
|
||||
|
|
|
@ -42,17 +42,13 @@ bool llama_load_model(const load_model_inputs inputs, FileFormat in_file_format)
|
|||
ctx_params.seed = -1;
|
||||
ctx_params.f16_kv = inputs.f16_kv;
|
||||
ctx_params.logits_all = false;
|
||||
ctx_params.use_mmap = inputs.use_mmap;
|
||||
|
||||
file_format = in_file_format;
|
||||
|
||||
if (file_format == FileFormat::GGML || file_format == FileFormat::GGHF)
|
||||
{
|
||||
ctx = legacy_llama_init_from_file(modelname.c_str(), ctx_params);
|
||||
}
|
||||
else
|
||||
{
|
||||
ctx = llama_init_from_file(modelname.c_str(), ctx_params);
|
||||
}
|
||||
|
||||
ctx = llama_init_from_file(modelname.c_str(), ctx_params);
|
||||
|
||||
|
||||
if (ctx == NULL)
|
||||
{
|
||||
|
|
12
llama_internal.h
Normal file
12
llama_internal.h
Normal file
|
@ -0,0 +1,12 @@
|
|||
// Internal header to be included by llama.cpp and tests/benchmarks only.
|
||||
|
||||
#ifndef LLAMA_INTERNAL_H
|
||||
#define LLAMA_INTERNAL_H
|
||||
|
||||
#include <vector>
|
||||
#include <string>
|
||||
struct ggml_tensor;
|
||||
|
||||
std::vector<std::pair<std::string, struct ggml_tensor *>>& llama_internal_get_tensor_map(struct llama_context * ctx);
|
||||
|
||||
#endif // LLAMA_INTERNAL_H
|
383
llama_util.h
Executable file
383
llama_util.h
Executable file
|
@ -0,0 +1,383 @@
|
|||
// Internal header to be included only by llama.cpp.
|
||||
// Contains wrappers around OS interfaces.
|
||||
#pragma once
|
||||
#ifndef LLAMA_UTIL_H
|
||||
#define LLAMA_UTIL_H
|
||||
|
||||
#include <cstdio>
|
||||
#include <cstdint>
|
||||
#include <cerrno>
|
||||
#include <cstring>
|
||||
#include <cstdarg>
|
||||
#include <cstdlib>
|
||||
#include <climits>
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#ifdef __has_include
|
||||
#if __has_include(<unistd.h>)
|
||||
#include <unistd.h>
|
||||
#if defined(_POSIX_MAPPED_FILES)
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#if defined(_WIN32)
|
||||
#define WIN32_LEAN_AND_MEAN
|
||||
#define NOMINMAX
|
||||
#include <windows.h>
|
||||
#include <io.h>
|
||||
#include <stdio.h> // for _fseeki64
|
||||
#endif
|
||||
|
||||
#define LLAMA_ASSERT(x) \
|
||||
do { \
|
||||
if (!(x)) { \
|
||||
fprintf(stderr, "LLAMA_ASSERT: %s:%d: %s\n", __FILE__, __LINE__, #x); \
|
||||
abort(); \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
#ifdef __GNUC__
|
||||
__attribute__((format(printf, 1, 2)))
|
||||
#endif
|
||||
static std::string format(const char * fmt, ...) {
|
||||
va_list ap, ap2;
|
||||
va_start(ap, fmt);
|
||||
va_copy(ap2, ap);
|
||||
int size = vsnprintf(NULL, 0, fmt, ap);
|
||||
LLAMA_ASSERT(size >= 0 && size < INT_MAX);
|
||||
std::vector<char> buf(size + 1);
|
||||
int size2 = vsnprintf(buf.data(), size + 1, fmt, ap2);
|
||||
LLAMA_ASSERT(size2 == size);
|
||||
va_end(ap2);
|
||||
va_end(ap);
|
||||
return std::string(buf.data(), size);
|
||||
};
|
||||
|
||||
struct llama_file {
|
||||
// use FILE * so we don't have to re-open the file to mmap
|
||||
FILE * fp;
|
||||
size_t size;
|
||||
|
||||
llama_file(const char * fname, const char * mode) {
|
||||
fp = std::fopen(fname, mode);
|
||||
if (fp == NULL) {
|
||||
throw format("failed to open %s: %s", fname, std::strerror(errno));
|
||||
}
|
||||
seek(0, SEEK_END);
|
||||
size = tell();
|
||||
seek(0, SEEK_SET);
|
||||
}
|
||||
|
||||
size_t tell() const {
|
||||
#ifdef _WIN32
|
||||
__int64 ret = _ftelli64(fp);
|
||||
#else
|
||||
long ret = std::ftell(fp);
|
||||
#endif
|
||||
LLAMA_ASSERT(ret != -1); // this really shouldn't fail
|
||||
return (size_t) ret;
|
||||
}
|
||||
|
||||
void seek(size_t offset, int whence) {
|
||||
#ifdef _WIN32
|
||||
int ret = _fseeki64(fp, (__int64) offset, whence);
|
||||
#else
|
||||
int ret = std::fseek(fp, (long) offset, whence);
|
||||
#endif
|
||||
LLAMA_ASSERT(ret == 0); // same
|
||||
}
|
||||
|
||||
void read_raw(void * ptr, size_t size) {
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
errno = 0;
|
||||
std::size_t ret = std::fread(ptr, size, 1, fp);
|
||||
if (ferror(fp)) {
|
||||
throw format("read error: %s", strerror(errno));
|
||||
}
|
||||
if (ret != 1) {
|
||||
throw std::string("unexpectedly reached end of file");
|
||||
}
|
||||
}
|
||||
|
||||
std::uint32_t read_u32() {
|
||||
std::uint32_t ret;
|
||||
read_raw(&ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
std::string read_string(std::uint32_t len) {
|
||||
std::vector<char> chars(len);
|
||||
read_raw(chars.data(), len);
|
||||
return std::string(chars.data(), len);
|
||||
}
|
||||
|
||||
void write_raw(const void * ptr, size_t size) {
|
||||
if (size == 0) {
|
||||
return;
|
||||
}
|
||||
errno = 0;
|
||||
size_t ret = std::fwrite(ptr, size, 1, fp);
|
||||
if (ret != 1) {
|
||||
throw format("write error: %s", strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
void write_u32(std::uint32_t val) {
|
||||
write_raw(&val, sizeof(val));
|
||||
}
|
||||
|
||||
~llama_file() {
|
||||
if (fp) {
|
||||
std::fclose(fp);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
#if defined(_WIN32)
|
||||
static std::string llama_format_win_err(DWORD err) {
|
||||
LPSTR buf;
|
||||
size_t size = FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||
NULL, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), (LPSTR)&buf, 0, NULL);
|
||||
if (!size) {
|
||||
return "FormatMessageA failed";
|
||||
}
|
||||
std::string ret(buf, size);
|
||||
LocalFree(buf);
|
||||
return ret;
|
||||
}
|
||||
#endif
|
||||
|
||||
struct llama_mmap {
|
||||
void * addr;
|
||||
size_t size;
|
||||
|
||||
llama_mmap(const llama_mmap &) = delete;
|
||||
|
||||
#ifdef _POSIX_MAPPED_FILES
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
llama_mmap(struct llama_file * file) {
|
||||
size = file->size;
|
||||
int fd = fileno(file->fp);
|
||||
int flags = MAP_SHARED;
|
||||
#ifdef __linux__
|
||||
flags |= MAP_POPULATE;
|
||||
#endif
|
||||
addr = mmap(NULL, file->size, PROT_READ, flags, fd, 0);
|
||||
close(fd);
|
||||
if (addr == MAP_FAILED) {
|
||||
throw format("mmap failed: %s", strerror(errno));
|
||||
}
|
||||
|
||||
// Advise the kernel to preload the mapped memory
|
||||
if (madvise(addr, file->size, MADV_WILLNEED)) {
|
||||
fprintf(stderr, "warning: madvise(.., MADV_WILLNEED) failed: %s\n",
|
||||
strerror(errno));
|
||||
}
|
||||
}
|
||||
|
||||
~llama_mmap() {
|
||||
munmap(addr, size);
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
llama_mmap(struct llama_file * file) {
|
||||
size = file->size;
|
||||
|
||||
HANDLE hFile = (HANDLE) _get_osfhandle(_fileno(file->fp));
|
||||
|
||||
HANDLE hMapping = CreateFileMappingA(hFile, NULL, PAGE_READONLY, 0, 0, NULL);
|
||||
DWORD error = GetLastError();
|
||||
CloseHandle(hFile);
|
||||
|
||||
if (hMapping == NULL) {
|
||||
throw format("CreateFileMappingA failed: %s", llama_format_win_err(error).c_str());
|
||||
}
|
||||
|
||||
addr = MapViewOfFile(hMapping, FILE_MAP_READ, 0, 0, 0);
|
||||
error = GetLastError();
|
||||
CloseHandle(hMapping);
|
||||
|
||||
if (addr == NULL) {
|
||||
throw format("MapViewOfFile failed: %s", llama_format_win_err(error).c_str());
|
||||
}
|
||||
|
||||
// Advise the kernel to preload the mapped memory
|
||||
WIN32_MEMORY_RANGE_ENTRY range;
|
||||
range.VirtualAddress = addr;
|
||||
range.NumberOfBytes = (SIZE_T)size;
|
||||
if (!PrefetchVirtualMemory(GetCurrentProcess(), 1, &range, 0)) {
|
||||
fprintf(stderr, "warning: PrefetchVirtualMemory failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
|
||||
~llama_mmap() {
|
||||
if (!UnmapViewOfFile(addr)) {
|
||||
fprintf(stderr, "warning: UnmapViewOfFile failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
#else
|
||||
static constexpr bool SUPPORTED = false;
|
||||
|
||||
llama_mmap(struct llama_file *) {
|
||||
throw std::string("mmap not supported");
|
||||
}
|
||||
#endif
|
||||
};
|
||||
|
||||
// Represents some region of memory being locked using mlock or VirtualLock;
|
||||
// will automatically unlock on destruction.
|
||||
struct llama_mlock {
|
||||
void * addr = NULL;
|
||||
size_t size = 0;
|
||||
bool failed_already = false;
|
||||
|
||||
llama_mlock() {}
|
||||
llama_mlock(const llama_mlock &) = delete;
|
||||
|
||||
~llama_mlock() {
|
||||
if (size) {
|
||||
raw_unlock(addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
void init(void * addr) {
|
||||
LLAMA_ASSERT(this->addr == NULL && this->size == 0);
|
||||
this->addr = addr;
|
||||
}
|
||||
|
||||
void grow_to(size_t target_size) {
|
||||
LLAMA_ASSERT(addr);
|
||||
if (failed_already) {
|
||||
return;
|
||||
}
|
||||
size_t granularity = lock_granularity();
|
||||
target_size = (target_size + granularity - 1) & ~(granularity - 1);
|
||||
if (target_size > size) {
|
||||
if (raw_lock((uint8_t *) addr + size, target_size - size)) {
|
||||
size = target_size;
|
||||
} else {
|
||||
failed_already = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _POSIX_MEMLOCK_RANGE
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
size_t lock_granularity() {
|
||||
return (size_t) sysconf(_SC_PAGESIZE);
|
||||
}
|
||||
|
||||
#ifdef __APPLE__
|
||||
#define MLOCK_SUGGESTION \
|
||||
"Try increasing the sysctl values 'vm.user_wire_limit' and 'vm.global_user_wire_limit' and/or " \
|
||||
"decreasing 'vm.global_no_user_wire_amount'. Also try increasing RLIMIT_MLOCK (ulimit -l).\n"
|
||||
#else
|
||||
#define MLOCK_SUGGESTION \
|
||||
"Try increasing RLIMIT_MLOCK ('ulimit -l' as root).\n"
|
||||
#endif
|
||||
|
||||
bool raw_lock(const void * addr, size_t size) {
|
||||
if (!mlock(addr, size)) {
|
||||
return true;
|
||||
} else {
|
||||
fprintf(stderr, "warning: failed to mlock %zu-byte buffer (after previously locking %zu bytes): %s\n" MLOCK_SUGGESTION,
|
||||
size, this->size, std::strerror(errno));
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
#undef MLOCK_SUGGESTION
|
||||
|
||||
void raw_unlock(void * addr, size_t size) {
|
||||
if (munlock(addr, size)) {
|
||||
fprintf(stderr, "warning: failed to munlock buffer: %s\n", std::strerror(errno));
|
||||
}
|
||||
}
|
||||
#elif defined(_WIN32)
|
||||
static constexpr bool SUPPORTED = true;
|
||||
|
||||
size_t lock_granularity() {
|
||||
SYSTEM_INFO si;
|
||||
GetSystemInfo(&si);
|
||||
return (size_t) si.dwPageSize;
|
||||
}
|
||||
|
||||
bool raw_lock(void * addr, size_t size) {
|
||||
for (int tries = 1; ; tries++) {
|
||||
if (VirtualLock(addr, size)) {
|
||||
return true;
|
||||
}
|
||||
if (tries == 2) {
|
||||
fprintf(stderr, "warning: failed to VirtualLock %zu-byte buffer (after previously locking %zu bytes): %s\n",
|
||||
size, this->size, llama_format_win_err(GetLastError()).c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
// It failed but this was only the first try; increase the working
|
||||
// set size and try again.
|
||||
SIZE_T min_ws_size, max_ws_size;
|
||||
if (!GetProcessWorkingSetSize(GetCurrentProcess(), &min_ws_size, &max_ws_size)) {
|
||||
fprintf(stderr, "warning: GetProcessWorkingSetSize failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
return false;
|
||||
}
|
||||
// Per MSDN: "The maximum number of pages that a process can lock
|
||||
// is equal to the number of pages in its minimum working set minus
|
||||
// a small overhead."
|
||||
// Hopefully a megabyte is enough overhead:
|
||||
size_t increment = size + 1048576;
|
||||
// The minimum must be <= the maximum, so we need to increase both:
|
||||
min_ws_size += size;
|
||||
max_ws_size += size;
|
||||
if (!SetProcessWorkingSetSize(GetCurrentProcess(), min_ws_size, max_ws_size)) {
|
||||
fprintf(stderr, "warning: SetProcessWorkingSetSize failed: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void raw_unlock(void * addr, size_t size) {
|
||||
if (!VirtualUnlock(addr, size)) {
|
||||
fprintf(stderr, "warning: failed to VirtualUnlock buffer: %s\n",
|
||||
llama_format_win_err(GetLastError()).c_str());
|
||||
}
|
||||
}
|
||||
#else
|
||||
static constexpr bool SUPPORTED = false;
|
||||
|
||||
void raw_lock(const void * addr, size_t size) {
|
||||
fprintf(stderr, "warning: mlock not supported on this system\n");
|
||||
}
|
||||
|
||||
void raw_unlock(const void * addr, size_t size) {}
|
||||
#endif
|
||||
};
|
||||
|
||||
// Replacement for std::vector<uint8_t> that doesn't require zero-initialization.
|
||||
struct llama_buffer {
|
||||
uint8_t * addr = NULL;
|
||||
size_t size = 0;
|
||||
|
||||
void resize(size_t size) {
|
||||
delete[] addr;
|
||||
addr = new uint8_t[size];
|
||||
this->size = size;
|
||||
}
|
||||
|
||||
~llama_buffer() {
|
||||
delete[] addr;
|
||||
}
|
||||
};
|
||||
#endif
|
613
llamaextra.cpp
613
llamaextra.cpp
|
@ -19,619 +19,6 @@
|
|||
#endif
|
||||
|
||||
|
||||
|
||||
//freeze all the configurations for model loading for v1 and v2 formats
|
||||
struct llama_context * legacy_llama_init_from_file(const char * path_model, struct llama_context_params params)
|
||||
{
|
||||
ggml_time_init();
|
||||
|
||||
llama_context * ctx = new llama_context;
|
||||
|
||||
if (params.seed <= 0) {
|
||||
params.seed = time(NULL);
|
||||
}
|
||||
|
||||
ctx->rng = std::mt19937(params.seed);
|
||||
ctx->logits_all = params.logits_all;
|
||||
|
||||
ggml_type memory_type = params.f16_kv ? GGML_TYPE_F16 : GGML_TYPE_F32;
|
||||
|
||||
if (!legacy_llama_model_load(path_model, *ctx, params.n_ctx, params.n_parts, memory_type,
|
||||
params.vocab_only, params.progress_callback,
|
||||
params.progress_callback_user_data)) {
|
||||
fprintf(stderr, "%s: failed to load model\n", __func__);
|
||||
llama_free(ctx);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (params.use_mlock) {
|
||||
char *err;
|
||||
if (!ggml_mlock(ctx->model.ctx,
|
||||
ctx->model.mm_addr,
|
||||
ctx->model.mm_length,
|
||||
&err)) {
|
||||
fprintf(stderr, "%s\n", err);
|
||||
free(err);
|
||||
llama_free(ctx);
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// reserve memory for context buffers
|
||||
{
|
||||
if (!kv_cache_init(ctx->model.hparams, ctx->model.kv_self, memory_type, ctx->model.hparams.n_ctx)) {
|
||||
fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__);
|
||||
llama_free(ctx);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
{
|
||||
const size_t memory_size = ggml_nbytes(ctx->model.kv_self.k) + ggml_nbytes(ctx->model.kv_self.v);
|
||||
fprintf(stderr, "%s: kv self size = %7.2f MB\n", __func__, memory_size / 1024.0 / 1024.0);
|
||||
}
|
||||
|
||||
const auto & hparams = ctx->model.hparams;
|
||||
|
||||
// resized during inference
|
||||
if (params.logits_all) {
|
||||
ctx->logits.reserve(hparams.n_ctx*hparams.n_vocab);
|
||||
} else {
|
||||
ctx->logits.reserve(hparams.n_ctx);
|
||||
}
|
||||
|
||||
if (params.embedding){
|
||||
ctx->embedding.resize(hparams.n_embd);
|
||||
}
|
||||
|
||||
ctx->buf_compute.resize(MEM_REQ_EVAL.at(ctx->model.type));
|
||||
|
||||
ctx->buf_scratch[0].resize(MEM_REQ_SCRATCH0.at(ctx->model.type));
|
||||
ctx->buf_scratch[1].resize(MEM_REQ_SCRATCH1.at(ctx->model.type));
|
||||
}
|
||||
|
||||
return ctx;
|
||||
}
|
||||
|
||||
//legacy llama model format v1 and v2 loader. there is a lot of duplicate code,
|
||||
//but it may be better to freeze it as such rather than risk tiny breaking changes
|
||||
static bool legacy_llama_model_load(
|
||||
const std::string & fname,
|
||||
llama_context & lctx,
|
||||
int n_ctx,
|
||||
int n_parts,
|
||||
ggml_type memory_type,
|
||||
bool vocab_only,
|
||||
llama_progress_callback progress_callback,
|
||||
void *progress_callback_user_data) {
|
||||
fprintf(stderr, "%s: Legacy loading model from '%s' - please wait ...\n", __func__, fname.c_str());
|
||||
|
||||
const int64_t t_start_us = ggml_time_us();
|
||||
|
||||
lctx.t_start_us = t_start_us;
|
||||
|
||||
std::vector<char> f_buf(1024*1024);
|
||||
|
||||
auto & model = lctx.model;
|
||||
auto & vocab = lctx.vocab;
|
||||
|
||||
auto fin = std::ifstream(fname, std::ios::binary);
|
||||
fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
|
||||
if (!fin) {
|
||||
fprintf(stderr, "%s: failed to open '%s'\n", __func__, fname.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
bool legacy_file_format = false;
|
||||
// verify magic
|
||||
{
|
||||
uint32_t magic;
|
||||
fin.read((char *) &magic, sizeof(magic));
|
||||
if (magic == 0x67676d6c) { // 'ggml' in hex, very first version
|
||||
fprintf(stderr, "%s: very old v1 model file '%s' (please regenerate your model files if you can!)\n",
|
||||
__func__, fname.c_str());
|
||||
legacy_file_format = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (magic != 0x67676d66) { // 'ggmf' in hex, second version
|
||||
fprintf(stderr, "%s: invalid legacy model file '%s' (bad magic)\n", __func__, fname.c_str());
|
||||
return false;
|
||||
}
|
||||
|
||||
uint32_t format_version;
|
||||
fin.read((char *) &format_version, sizeof(format_version));
|
||||
|
||||
uint32_t v2_format_version = 1;
|
||||
if (format_version != v2_format_version) {
|
||||
fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ", expected %d)\n",
|
||||
__func__, fname.c_str(), format_version, v2_format_version);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int n_ff = 0;
|
||||
|
||||
// load hparams
|
||||
{
|
||||
auto & hparams = model.hparams;
|
||||
|
||||
fin.read((char *) &hparams.n_vocab, sizeof(hparams.n_vocab));
|
||||
//fin.read((char *) &hparams.n_ctx, sizeof(hparams.n_ctx));
|
||||
fin.read((char *) &hparams.n_embd, sizeof(hparams.n_embd));
|
||||
fin.read((char *) &hparams.n_mult, sizeof(hparams.n_mult));
|
||||
fin.read((char *) &hparams.n_head, sizeof(hparams.n_head));
|
||||
fin.read((char *) &hparams.n_layer, sizeof(hparams.n_layer));
|
||||
fin.read((char *) &hparams.n_rot, sizeof(hparams.n_rot));
|
||||
fin.read((char *) &hparams.f16, sizeof(hparams.f16));
|
||||
|
||||
hparams.n_ctx = n_ctx;
|
||||
|
||||
n_ff = ((2*(4*hparams.n_embd)/3 + hparams.n_mult - 1)/hparams.n_mult)*hparams.n_mult;
|
||||
|
||||
if (n_parts < 1) {
|
||||
n_parts = LLAMA_N_PARTS.at(hparams.n_embd);
|
||||
}
|
||||
|
||||
// temp warning to tell the user to use "--n_parts"
|
||||
if (hparams.f16 == 4 && n_parts != 1) {
|
||||
fprintf(stderr, "%s: GPTQ model detected - are you sure n_parts should be %d? we normally expect it to be 1\n", __func__, n_parts);
|
||||
fprintf(stderr, "%s: use '--n_parts 1' if necessary\n", __func__);
|
||||
}
|
||||
|
||||
if (hparams.n_layer == 32) {
|
||||
model.type = e_model::MODEL_7B;
|
||||
}
|
||||
|
||||
if (hparams.n_layer == 40) {
|
||||
model.type = e_model::MODEL_13B;
|
||||
}
|
||||
|
||||
if (hparams.n_layer == 60) {
|
||||
model.type = e_model::MODEL_30B;
|
||||
}
|
||||
|
||||
if (hparams.n_layer == 80) {
|
||||
model.type = e_model::MODEL_65B;
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: n_vocab = %d\n", __func__, hparams.n_vocab);
|
||||
fprintf(stderr, "%s: n_ctx = %d\n", __func__, hparams.n_ctx);
|
||||
fprintf(stderr, "%s: n_embd = %d\n", __func__, hparams.n_embd);
|
||||
fprintf(stderr, "%s: n_mult = %d\n", __func__, hparams.n_mult);
|
||||
fprintf(stderr, "%s: n_head = %d\n", __func__, hparams.n_head);
|
||||
fprintf(stderr, "%s: n_layer = %d\n", __func__, hparams.n_layer);
|
||||
fprintf(stderr, "%s: n_rot = %d\n", __func__, hparams.n_rot);
|
||||
fprintf(stderr, "%s: f16 = %d\n", __func__, hparams.f16);
|
||||
fprintf(stderr, "%s: n_ff = %d\n", __func__, n_ff);
|
||||
fprintf(stderr, "%s: n_parts = %d\n", __func__, n_parts);
|
||||
fprintf(stderr, "%s: type = %d\n", __func__, model.type);
|
||||
}
|
||||
|
||||
// load vocab
|
||||
{
|
||||
std::string word;
|
||||
vocab.id_to_token.resize(model.hparams.n_vocab);
|
||||
std::vector<char> tmp(64);
|
||||
|
||||
int32_t vocabloops = model.hparams.n_vocab;
|
||||
if(vocabloops==32001 && legacy_file_format)
|
||||
{
|
||||
printf("---\n!! WARNING: Model appears to be GPT4ALL v1 model, triggering compatibility fix !!\n---\n");
|
||||
vocabloops -= 1;
|
||||
}
|
||||
for (int i = 0; i < vocabloops; i++) {
|
||||
uint32_t len;
|
||||
fin.read((char *) &len, sizeof(len));
|
||||
|
||||
word.resize(len);
|
||||
if (len > 0) {
|
||||
tmp.resize(len);
|
||||
fin.read(tmp.data(), len);
|
||||
word.assign(tmp.data(), len);
|
||||
} else {
|
||||
word.clear();
|
||||
}
|
||||
|
||||
float score;
|
||||
if(!legacy_file_format)
|
||||
{
|
||||
fin.read((char *) &score, sizeof(score));
|
||||
}
|
||||
|
||||
vocab.token_to_id[word] = i;
|
||||
|
||||
auto &tok_score = vocab.id_to_token[i];
|
||||
tok_score.tok = word;
|
||||
tok_score.score = score;
|
||||
}
|
||||
}
|
||||
|
||||
if (vocab_only) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// for the big tensors, we have the option to store the data in 16-bit floats or quantized
|
||||
// in order to save memory and also to speed up the computation
|
||||
// wtype is for per-layer weights, while vtype is for other weights
|
||||
ggml_type wtype, vtype;
|
||||
switch (model.hparams.f16) {
|
||||
case 0: wtype = vtype = GGML_TYPE_F32; break;
|
||||
case 1: wtype = vtype = GGML_TYPE_F16; break;
|
||||
case 2: wtype = vtype = GGML_TYPE_Q4_0; break;
|
||||
case 3: wtype = vtype = GGML_TYPE_Q4_1; break;
|
||||
case 4: wtype = GGML_TYPE_Q4_1; vtype = GGML_TYPE_F16; break;
|
||||
default:
|
||||
{
|
||||
fprintf(stderr, "%s: invalid model file '%s' (bad f16 value %d)\n",
|
||||
__func__, fname.c_str(), model.hparams.f16);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
auto & ctx = model.ctx;
|
||||
|
||||
size_t ctx_size = 0;
|
||||
|
||||
{
|
||||
const auto & hparams = model.hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_ctx = hparams.n_ctx;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // tok_embeddings
|
||||
|
||||
ctx_size += n_embd*ggml_type_sizef(GGML_TYPE_F32); // norm
|
||||
|
||||
ctx_size += n_embd*n_vocab*ggml_type_sizef(vtype); // output
|
||||
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // attention_norm
|
||||
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wq
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wk
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wv
|
||||
ctx_size += n_layer*(n_embd*n_embd*ggml_type_sizef(wtype)); // wo
|
||||
|
||||
ctx_size += n_layer*(n_embd*ggml_type_sizef(GGML_TYPE_F32)); // ffn_norm
|
||||
|
||||
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w1
|
||||
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w2
|
||||
ctx_size += n_layer*(n_ff*n_embd*ggml_type_sizef(wtype)); // w3
|
||||
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_k
|
||||
ctx_size += n_ctx*n_layer*n_embd*ggml_type_sizef(memory_type); // memory_v
|
||||
|
||||
ctx_size += (5 + 10*n_layer)*256; // object overhead
|
||||
|
||||
fprintf(stderr, "%s: ggml ctx size = %6.2f MB\n", __func__, ctx_size/(1024.0*1024.0));
|
||||
}
|
||||
|
||||
// print memory requirements
|
||||
{
|
||||
const size_t scale = memory_type == GGML_TYPE_F32 ? 2 : 1;
|
||||
|
||||
// this is the total memory required to run the inference
|
||||
const size_t mem_required =
|
||||
ctx_size +
|
||||
MEM_REQ_SCRATCH0.at(model.type) +
|
||||
MEM_REQ_SCRATCH1.at(model.type) +
|
||||
MEM_REQ_EVAL.at (model.type);
|
||||
|
||||
// this is the memory required by one llama_state
|
||||
const size_t mem_required_state =
|
||||
scale*MEM_REQ_KV_SELF.at(model.type);
|
||||
|
||||
fprintf(stderr, "%s: mem required = %7.2f MB (+ %7.2f MB per state)\n", __func__,
|
||||
mem_required / 1024.0 / 1024.0, mem_required_state / 1024.0 / 1024.0);
|
||||
}
|
||||
|
||||
// create the ggml context
|
||||
{
|
||||
lctx.model.buf.resize(ctx_size);
|
||||
|
||||
struct ggml_init_params params = {
|
||||
/*.mem_size =*/ lctx.model.buf.size(),
|
||||
/*.mem_buffer =*/ lctx.model.buf.data(),
|
||||
};
|
||||
|
||||
model.ctx = ggml_init(params);
|
||||
if (!model.ctx) {
|
||||
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// prepare memory for the weights
|
||||
{
|
||||
const auto & hparams = model.hparams;
|
||||
|
||||
const int n_embd = hparams.n_embd;
|
||||
const int n_layer = hparams.n_layer;
|
||||
const int n_vocab = hparams.n_vocab;
|
||||
|
||||
model.layers.resize(n_layer);
|
||||
|
||||
model.tok_embeddings = ggml_new_tensor_2d(ctx, vtype, n_embd, n_vocab);
|
||||
|
||||
model.norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
model.output = ggml_new_tensor_2d(ctx, vtype, n_embd, n_vocab);
|
||||
|
||||
// map by name
|
||||
model.tensors["tok_embeddings.weight"] = model.tok_embeddings;
|
||||
|
||||
model.tensors["norm.weight"] = model.norm;
|
||||
model.tensors["output.weight"] = model.output;
|
||||
|
||||
for (int i = 0; i < n_layer; ++i) {
|
||||
auto & layer = model.layers[i];
|
||||
|
||||
layer.attention_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
layer.wq = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.wk = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.wv = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
layer.wo = ggml_new_tensor_2d(ctx, wtype, n_embd, n_embd);
|
||||
|
||||
layer.ffn_norm = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
layer.w1 = ggml_new_tensor_2d(ctx, wtype, n_embd, n_ff);
|
||||
layer.w2 = ggml_new_tensor_2d(ctx, wtype, n_ff, n_embd);
|
||||
layer.w3 = ggml_new_tensor_2d(ctx, wtype, n_embd, n_ff);
|
||||
|
||||
// map by name
|
||||
model.tensors["layers." + std::to_string(i) + ".attention_norm.weight"] = layer.attention_norm;
|
||||
|
||||
model.tensors["layers." + std::to_string(i) + ".attention.wq.weight"] = layer.wq;
|
||||
model.tensors["layers." + std::to_string(i) + ".attention.wk.weight"] = layer.wk;
|
||||
model.tensors["layers." + std::to_string(i) + ".attention.wv.weight"] = layer.wv;
|
||||
model.tensors["layers." + std::to_string(i) + ".attention.wo.weight"] = layer.wo;
|
||||
|
||||
model.tensors["layers." + std::to_string(i) + ".ffn_norm.weight"] = layer.ffn_norm;
|
||||
|
||||
model.tensors["layers." + std::to_string(i) + ".feed_forward.w1.weight"] = layer.w1;
|
||||
model.tensors["layers." + std::to_string(i) + ".feed_forward.w2.weight"] = layer.w2;
|
||||
model.tensors["layers." + std::to_string(i) + ".feed_forward.w3.weight"] = layer.w3;
|
||||
}
|
||||
}
|
||||
|
||||
const size_t file_offset = fin.tellg();
|
||||
|
||||
fin.close();
|
||||
|
||||
std::vector<uint8_t> tmp;
|
||||
|
||||
if (progress_callback) {
|
||||
progress_callback(0.0, progress_callback_user_data);
|
||||
}
|
||||
|
||||
for (int i = 0; i < n_parts; ++i) {
|
||||
const int part_id = i;
|
||||
//const int part_id = n_parts - i - 1;
|
||||
|
||||
std::string fname_part = fname;
|
||||
if (i > 0) {
|
||||
fname_part += "." + std::to_string(i);
|
||||
}
|
||||
|
||||
fprintf(stderr, "%s: loading model part %d/%d from '%s'\n", __func__, i+1, n_parts, fname_part.c_str());
|
||||
|
||||
fin = std::ifstream(fname_part, std::ios::binary);
|
||||
fin.rdbuf()->pubsetbuf(f_buf.data(), f_buf.size());
|
||||
|
||||
fin.seekg(0, fin.end);
|
||||
const size_t file_size = fin.tellg();
|
||||
|
||||
fin.seekg(file_offset);
|
||||
|
||||
// load weights
|
||||
{
|
||||
size_t total_size = 0;
|
||||
|
||||
model.n_loaded = 0;
|
||||
|
||||
fprintf(stderr, "%s: ", __func__);
|
||||
|
||||
while (true) {
|
||||
int32_t n_dims;
|
||||
int32_t length;
|
||||
int32_t ftype;
|
||||
|
||||
fin.read(reinterpret_cast<char *>(&n_dims), sizeof(n_dims));
|
||||
fin.read(reinterpret_cast<char *>(&length), sizeof(length));
|
||||
fin.read(reinterpret_cast<char *>(&ftype), sizeof(ftype));
|
||||
|
||||
if (fin.eof()) {
|
||||
break;
|
||||
}
|
||||
|
||||
int32_t nelements = 1;
|
||||
int32_t ne[2] = { 1, 1 };
|
||||
for (int i = 0; i < n_dims; ++i) {
|
||||
fin.read(reinterpret_cast<char *>(&ne[i]), sizeof(ne[i]));
|
||||
nelements *= ne[i];
|
||||
}
|
||||
|
||||
std::string name(length, 0);
|
||||
fin.read(&name[0], length);
|
||||
|
||||
if (model.tensors.find(name.data()) == model.tensors.end()) {
|
||||
fprintf(stderr, "%s: unknown tensor '%s' in model file\n", __func__, name.data());
|
||||
return false;
|
||||
}
|
||||
|
||||
// split_type = 0: split by columns
|
||||
// split_type = 1: split by rows
|
||||
int split_type = 0;
|
||||
|
||||
// split_type = 0:
|
||||
// regex:
|
||||
// - tok_embeddings.*
|
||||
// - layers.*.attention.wo.weight
|
||||
// - layers.*.feed_forward.w2.weight
|
||||
|
||||
// split_type = 1:
|
||||
// regex:
|
||||
// - output.*
|
||||
// - layers.*.attention.wq.weight
|
||||
// - layers.*.attention.wk.weight
|
||||
// - layers.*.attention.wv.weight
|
||||
// - layers.*.feed_forward.w1.weight
|
||||
// - layers.*.feed_forward.w3.weight
|
||||
if (name.find("tok_embeddings") != std::string::npos) {
|
||||
split_type = 0;
|
||||
} else if (name.find("layers") != std::string::npos) {
|
||||
if (name.find("attention.wo.weight") != std::string::npos) {
|
||||
split_type = 0;
|
||||
} else if (name.find("feed_forward.w2.weight") != std::string::npos) {
|
||||
split_type = 0;
|
||||
} else {
|
||||
split_type = 1;
|
||||
}
|
||||
} else if (name.find("output") != std::string::npos) {
|
||||
split_type = 1;
|
||||
}
|
||||
|
||||
auto tensor = model.tensors[name.data()];
|
||||
|
||||
if (n_dims == 1) {
|
||||
if (ggml_nelements(tensor) != nelements) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (ggml_nelements(tensor)/n_parts != nelements) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file\n", __func__, name.data());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (n_dims == 1) {
|
||||
if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
||||
__func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (split_type == 0) {
|
||||
if (tensor->ne[0]/n_parts != ne[0] || tensor->ne[1] != ne[1]) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
||||
__func__, name.data(), tensor->ne[0]/n_parts, tensor->ne[1], ne[0], ne[1]);
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
if (tensor->ne[0] != ne[0] || tensor->ne[1]/n_parts != ne[1]) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n",
|
||||
__func__, name.data(), tensor->ne[0], tensor->ne[1]/n_parts, ne[0], ne[1]);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (0) {
|
||||
static const char * ftype_str[] = { "f32", "f16", "q4_0", "q4_1", };
|
||||
fprintf(stderr, "%24s - [%5d, %5d], type = %6s, split = %d\n", name.data(), ne[0], ne[1], ftype_str[ftype], split_type);
|
||||
}
|
||||
|
||||
size_t bpe = 0;
|
||||
|
||||
switch (ftype) {
|
||||
case 0: bpe = ggml_type_size(GGML_TYPE_F32); break;
|
||||
case 1: bpe = ggml_type_size(GGML_TYPE_F16); break;
|
||||
case 2: bpe = ggml_type_size(GGML_TYPE_Q4_0); assert(ne[0] % 64 == 0); break;
|
||||
case 3: bpe = ggml_type_size(GGML_TYPE_Q4_1); assert(ne[0] % 64 == 0); break;
|
||||
default:
|
||||
{
|
||||
fprintf(stderr, "%s: unknown ftype %d in model file\n", __func__, ftype);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
|
||||
if (n_dims == 1 || n_parts == 1) {
|
||||
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
||||
__func__, name.data(), ggml_nbytes(tensor), nelements*bpe);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (part_id == 0) {
|
||||
fin.read(reinterpret_cast<char *>(tensor->data), ggml_nbytes(tensor));
|
||||
} else {
|
||||
fin.seekg(ggml_nbytes(tensor), std::ios::cur);
|
||||
}
|
||||
|
||||
total_size += ggml_nbytes(tensor);
|
||||
} else {
|
||||
if ((nelements*bpe)/ggml_blck_size(tensor->type) != ggml_nbytes(tensor)/n_parts) {
|
||||
fprintf(stderr, "%s: tensor '%s' has wrong size in model file: got %zu, expected %zu\n",
|
||||
__func__, name.data(), ggml_nbytes(tensor)/n_parts, nelements*bpe);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (split_type == 0) {
|
||||
const int np0 = ne[0];
|
||||
|
||||
const size_t row_size = (tensor->ne[0]/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
|
||||
assert(row_size == tensor->nb[1]);
|
||||
|
||||
for (int i1 = 0; i1 < ne[1]; ++i1) {
|
||||
const size_t offset_row = i1*row_size;
|
||||
const size_t offset = offset_row + ((part_id*np0)/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
|
||||
fin.read(reinterpret_cast<char *>(tensor->data) + offset, row_size/n_parts);
|
||||
}
|
||||
} else {
|
||||
const int np1 = ne[1];
|
||||
|
||||
const size_t row_size = (tensor->ne[0]/ggml_blck_size(tensor->type))*ggml_type_size(tensor->type);
|
||||
|
||||
for (int i1 = 0; i1 < ne[1]; ++i1) {
|
||||
const size_t offset_row = (i1 + part_id*np1)*row_size;
|
||||
fin.read(reinterpret_cast<char *>(tensor->data) + offset_row, row_size);
|
||||
}
|
||||
}
|
||||
|
||||
total_size += ggml_nbytes(tensor)/n_parts;
|
||||
}
|
||||
|
||||
//fprintf(stderr, "%42s - [%5d, %5d], type = %6s, %6.2f MB\n", name.data(), ne[0], ne[1], ftype == 0 ? "float" : "f16", ggml_nbytes(tensor)/1024.0/1024.0);
|
||||
model.n_loaded++;
|
||||
|
||||
// progress
|
||||
if (progress_callback) {
|
||||
float current_file_progress = float(size_t(fin.tellg()) - file_offset) / float(file_size - file_offset);
|
||||
float current_progress = (float(i) + current_file_progress) / float(n_parts);
|
||||
progress_callback(current_progress, progress_callback_user_data);
|
||||
}
|
||||
if (model.n_loaded % 8 == 0) {
|
||||
fprintf(stderr, ".");
|
||||
fflush(stderr);
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr, " done\n");
|
||||
|
||||
fprintf(stderr, "%s: model size = %8.2f MB / num tensors = %d\n", __func__, total_size/1024.0/1024.0, model.n_loaded);
|
||||
if (model.n_loaded == 0) {
|
||||
fprintf(stderr, "%s: WARN no tensors loaded from model file - assuming empty model for testing\n", __func__);
|
||||
} else if (model.n_loaded != (int) model.tensors.size()) {
|
||||
fprintf(stderr, "%s: ERROR not all tensors loaded from model file - expected %zu, got %d\n", __func__, model.tensors.size(), model.n_loaded);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
fin.close();
|
||||
}
|
||||
|
||||
lctx.t_load_us = ggml_time_us() - t_start_us;
|
||||
|
||||
if (progress_callback) {
|
||||
progress_callback(1.0, progress_callback_user_data);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO: Calculate this constant from the vocabulary
|
||||
#define MAX_TOKEN_LEN 18
|
||||
// SentencePiece implementation after https://guillaume-be.github.io/2020-05-30/sentence_piece
|
||||
|
|
|
@ -17,6 +17,4 @@
|
|||
|
||||
|
||||
|
||||
std::vector<llama_token> legacy_llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos);
|
||||
static bool legacy_llama_model_load(const std::string & fname, llama_context & lctx, int n_ctx, int n_parts, ggml_type memory_type, bool vocab_only, llama_progress_callback progress_callback, void *progress_callback_user_data);
|
||||
struct llama_context * legacy_llama_init_from_file(const char * path_model, struct llama_context_params params);
|
||||
std::vector<llama_token> legacy_llama_tokenize(struct llama_context * ctx, const std::string & text, bool add_bos);
|
Loading…
Add table
Add a link
Reference in a new issue