Loop in llama.cpp, fixed progress callback

This commit is contained in:
JohannesGaessler 2023-05-20 13:42:19 +02:00
parent fee87f6558
commit b81f662e9d
3 changed files with 37 additions and 32 deletions

View file

@ -894,13 +894,10 @@ void ggml_cuda_transform_tensor(ggml_tensor * tensor) {
tensor->backend = GGML_BACKEND_CUDA; tensor->backend = GGML_BACKEND_CUDA;
} }
void ggml_cuda_load_data(const char * fname, struct ggml_tensor ** tensors, const int num_tensors, const size_t * offsets) { void ggml_cuda_load_data(const char * fname, struct ggml_tensor * tensor, const size_t offset) {
FILE * fp = fopen(fname, "rb"); FILE * fp = fopen(fname, "rb");
for (int i = 0; i < num_tensors; ++i) {
ggml_tensor * tensor = tensors[i];
const size_t size = ggml_nbytes(tensor); const size_t size = ggml_nbytes(tensor);
const size_t offset = offsets[i];
void * buf; void * buf;
CUDA_CHECK(cudaMalloc(&buf, size)); CUDA_CHECK(cudaMalloc(&buf, size));
@ -925,4 +922,3 @@ void ggml_cuda_load_data(const char * fname, struct ggml_tensor ** tensors, cons
tensor->data = buf; tensor->data = buf;
free(buf_host); free(buf_host);
} }
}

View file

@ -16,7 +16,7 @@ void * ggml_cuda_host_malloc(size_t size);
void ggml_cuda_host_free(void * ptr); void ggml_cuda_host_free(void * ptr);
void ggml_cuda_transform_tensor(struct ggml_tensor * tensor); void ggml_cuda_transform_tensor(struct ggml_tensor * tensor);
void ggml_cuda_load_data(const char * fname, struct ggml_tensor ** tensors, int num_tensors, const size_t * offsets); void ggml_cuda_load_data(const char * fname, struct ggml_tensor * tensors, size_t offset);
#ifdef __cplusplus #ifdef __cplusplus
} }

View file

@ -1,6 +1,7 @@
// Defines fileno on msys: // Defines fileno on msys:
#ifndef _GNU_SOURCE #ifndef _GNU_SOURCE
#define _GNU_SOURCE #define _GNU_SOURCE
#include <cstddef>
#include <cstdint> #include <cstdint>
#include <cstdio> #include <cstdio>
#endif #endif
@ -720,9 +721,6 @@ struct llama_model_loader {
lmlock->grow_to(done_size); lmlock->grow_to(done_size);
} }
} }
if (progress_callback) {
progress_callback(1.0f, progress_callback_user_data);
}
} }
void load_data_for(llama_load_tensor & lt) { void load_data_for(llama_load_tensor & lt) {
@ -1104,20 +1102,31 @@ static void llama_model_load_internal(
#ifdef GGML_USE_CUBLAS #ifdef GGML_USE_CUBLAS
{ {
std::vector<struct ggml_tensor *> tensors; size_t done_size = 0;
std::vector<size_t> offsets; size_t data_size = 0;
for (llama_load_tensor & lt : ml->tensors_map.tensors) {
data_size += lt.size;
if (lt.ggml_tensor->backend == GGML_BACKEND_CPU) {
done_size += lt.size;
}
}
for (llama_load_tensor & lt : ml->tensors_map.tensors) { for (llama_load_tensor & lt : ml->tensors_map.tensors) {
if (lt.ggml_tensor->backend != GGML_BACKEND_CUDA) { if (lt.ggml_tensor->backend != GGML_BACKEND_CUDA) {
continue; continue;
} }
tensors.emplace_back(lt.ggml_tensor); if (progress_callback) {
LLAMA_ASSERT(lt.shards.size() == 1); progress_callback((float) done_size / data_size, progress_callback_user_data);
offsets.emplace_back(lt.shards.at(0).file_off); }
ggml_cuda_load_data(fname.c_str(), lt.ggml_tensor, lt.shards.at(0).file_off);
done_size += lt.size;
} }
ggml_cuda_load_data(fname.c_str(), tensors.data(), tensors.size(), offsets.data());
} }
#endif // GGML_USE_CUBLAS #endif // GGML_USE_CUBLAS
if (progress_callback) {
progress_callback(1.0f, progress_callback_user_data);
}
model.mapping = std::move(ml->mapping); model.mapping = std::move(ml->mapping);
// loading time will be recalculate after the first eval, so // loading time will be recalculate after the first eval, so