minor : indentation + assert

This commit is contained in:
Georgi Gerganov 2023-08-14 14:10:21 +03:00
parent f4a0e0ec5a
commit 797088a7cd
No known key found for this signature in database
GPG key ID: 449E073F9DC10735
3 changed files with 101 additions and 91 deletions

View file

@ -8,6 +8,12 @@
#include <sstream> #include <sstream>
#include <fstream> #include <fstream>
#include <vector> #include <vector>
#undef MIN
#undef MAX
#define MIN(a, b) ((a) < (b) ? (a) : (b))
#define MAX(a, b) ((a) > (b) ? (a) : (b))
/* /*
template<typename T> template<typename T>
static std::string to_string(const T & val) { static std::string to_string(const T & val) {
@ -16,6 +22,7 @@ static std::string to_string(const T & val) {
return ss.str(); return ss.str();
} }
*/ */
void gguf_ex_write_str(std::ofstream & fout, const std::string & val) { void gguf_ex_write_str(std::ofstream & fout, const std::string & val) {
const int32_t n = val.size(); const int32_t n = val.size();
fout.write((const char *) &n, sizeof(n)); fout.write((const char *) &n, sizeof(n));
@ -377,11 +384,13 @@ bool gguf_ex_read_2(const std::string & fname) {
struct gguf_file file(fname.c_str(), "rb"); struct gguf_file file(fname.c_str(), "rb");
gguf_mmap data_mmap(&file, 0, false); gguf_mmap data_mmap(&file, 0, false);
const int n_tensors = gguf_get_n_tensors(ctx); const int n_tensors = gguf_get_n_tensors(ctx);
for (int i = 0; i < n_tensors; ++i) { for (int i = 0; i < n_tensors; ++i) {
const char * name = gguf_get_tensor_name(ctx, i); const char * name = gguf_get_tensor_name(ctx, i);
const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i); const size_t offset = gguf_get_data_offset(ctx) + gguf_get_tensor_offset(ctx, i);
struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
cur->data = static_cast<char *>(data_mmap.addr) + offset; cur->data = static_cast<char *>(data_mmap.addr) + offset;
@ -390,11 +399,9 @@ bool gguf_ex_read_2(const std::string & fname) {
const float * data = (const float *) cur->data; const float * data = (const float *) cur->data;
printf("%s data[:10] : ", name); printf("%s data[:10] : ", name);
for (int j = 0; j < MIN(10, ggml_nelements(cur)); ++j) {
for (int j = 0; j < 10; ++j) {
printf("%f ", data[j]); printf("%f ", data[j]);
} }
printf("\n\n"); printf("\n\n");
} }

View file

@ -508,16 +508,15 @@ struct gguf_load_tensors_map {
enum gguf_file_version { enum gguf_file_version {
GGUF_FILE_VERSION_V1 = 1, GGUF_FILE_VERSION_V1 = 1,
}; };
struct gguf_file_loader { struct gguf_file_loader {
gguf_file file; gguf_file file;
gguf_context * gguf_ctx; gguf_context * gguf_ctx;
gguf_file_version file_version; gguf_file_version file_version;
llama_hparams hparams; llama_hparams hparams;
llama_vocab vocab; llama_vocab vocab;
struct ggml_context * ctx_data = NULL; struct ggml_context * ctx_data = NULL;
gguf_file_loader(const char * fname, gguf_load_tensors_map & tensors_map) gguf_file_loader(const char * fname, gguf_load_tensors_map & tensors_map)
@ -537,7 +536,7 @@ struct ggml_context * ctx_data = NULL;
read_tensor_metadata(tensors_map); read_tensor_metadata(tensors_map);
} }
uint32_t read_u32(const char * key) { uint32_t read_u32(const char * key) const {
int i = gguf_find_key(gguf_ctx, key); int i = gguf_find_key(gguf_ctx, key);
if (i == -1) { if (i == -1) {
throw std::runtime_error(format("cannot find param with key %s\n", key)); throw std::runtime_error(format("cannot find param with key %s\n", key));
@ -546,7 +545,7 @@ struct ggml_context * ctx_data = NULL;
return gguf_get_val_u32(gguf_ctx, i); return gguf_get_val_u32(gguf_ctx, i);
} }
float read_f32(const char * key) { float read_f32(const char * key) const {
int i = gguf_find_key(gguf_ctx, key); int i = gguf_find_key(gguf_ctx, key);
if (i == -1) { if (i == -1) {
throw std::runtime_error(format("cannot find param with key %s\n", key)); throw std::runtime_error(format("cannot find param with key %s\n", key));
@ -555,7 +554,7 @@ struct ggml_context * ctx_data = NULL;
return gguf_get_val_f32(gguf_ctx, i); return gguf_get_val_f32(gguf_ctx, i);
} }
int read_n_vocab() { int read_n_vocab() const {
int i = gguf_find_key(gguf_ctx, "tokenizer.ggml.tokens"); int i = gguf_find_key(gguf_ctx, "tokenizer.ggml.tokens");
if (i == -1) { if (i == -1) {
throw std::runtime_error("cannot find token list in GGUF file\n"); throw std::runtime_error("cannot find token list in GGUF file\n");
@ -565,7 +564,6 @@ struct ggml_context * ctx_data = NULL;
} }
void read_hparams() { void read_hparams() {
// TODO define keys as constants in header // TODO define keys as constants in header
// TODO: read all hparams from file // TODO: read all hparams from file
@ -606,7 +604,7 @@ struct ggml_context * ctx_data = NULL;
} }
} }
void read_tensor_metadata(gguf_load_tensors_map & tensors_map) { void read_tensor_metadata(gguf_load_tensors_map & tensors_map) const {
const int n_tensors = gguf_get_n_tensors(gguf_ctx); const int n_tensors = gguf_get_n_tensors(gguf_ctx);
for (int i = 0; i < n_tensors; ++i) { for (int i = 0; i < n_tensors; ++i) {
@ -614,9 +612,11 @@ struct ggml_context * ctx_data = NULL;
const char * name = gguf_get_tensor_name(gguf_ctx, i); const char * name = gguf_get_tensor_name(gguf_ctx, i);
struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name); struct ggml_tensor * cur = ggml_get_tensor(ctx_data, name);
uint32_t n_dims = cur->n_dims;
const uint32_t n_dims = cur->n_dims;
tensor.type = cur->type; tensor.type = cur->type;
tensor.ne.resize(n_dims); tensor.ne.resize(n_dims);
for (uint32_t j = 0; j < n_dims; ++j) { for (uint32_t j = 0; j < n_dims; ++j) {
tensor.ne[j] = cur->ne[j]; tensor.ne[j] = cur->ne[j];
} }
@ -624,6 +624,7 @@ struct ggml_context * ctx_data = NULL;
if (n_dims < 1 || n_dims > 2) { if (n_dims < 1 || n_dims > 2) {
throw std::runtime_error(format("llama.cpp: tensor '%s' should not be %u-dimensional", name, n_dims)); throw std::runtime_error(format("llama.cpp: tensor '%s' should not be %u-dimensional", name, n_dims));
} }
switch (tensor.type) { switch (tensor.type) {
case GGML_TYPE_F32: case GGML_TYPE_F32:
case GGML_TYPE_F16: case GGML_TYPE_F16:
@ -643,7 +644,6 @@ struct ggml_context * ctx_data = NULL;
} }
} }
tensor.file_off = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, i); tensor.file_off = gguf_get_data_offset(gguf_ctx) + gguf_get_tensor_offset(gguf_ctx, i);
tensor.name = name; tensor.name = name;
@ -787,6 +787,9 @@ struct gguf_file_saver {
} }
info_offset = file.tell(); info_offset = file.tell();
GGML_ASSERT(gguf_get_data_offset(fl->gguf_ctx) >= info_offset);
size_t count = gguf_get_data_offset(fl->gguf_ctx) - info_offset; size_t count = gguf_get_data_offset(fl->gguf_ctx) - info_offset;
file.write_zeros(count); file.write_zeros(count);
file.seek(info_offset, SEEK_SET); file.seek(info_offset, SEEK_SET);