add some info in loading

This commit is contained in:
Liu Ming 2023-05-26 16:18:49 +08:00
parent 4905366fe8
commit 991002b28e
2 changed files with 8 additions and 4 deletions

View file

@ -1,16 +1,17 @@
set(TARGET grpc-server) set(TARGET grpc-server)
set(_PROTOBUF_LIBPROTOBUF libprotobuf) set(_PROTOBUF_LIBPROTOBUF libprotobuf)
set(_REFLECTION grpc++_reflection) set(_REFLECTION grpc++_reflection)
include_directories($ENV{MY_INSTALL_DIR}/include)
find_package(absl REQUIRED PATHS $ENV{MY_INSTALL_DIR}/lib) find_package(absl REQUIRED)
find_package(Protobuf CONFIG REQUIRED PATHS $ENV{MY_INSTALL_DIR}/lib) find_package(Protobuf CONFIG REQUIRED)
find_package(gRPC CONFIG REQUIRED) find_package(gRPC CONFIG REQUIRED)
find_program(_PROTOBUF_PROTOC protoc) find_program(_PROTOBUF_PROTOC protoc)
set(_GRPC_GRPCPP grpc++) set(_GRPC_GRPCPP grpc++)
find_program(_GRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin) find_program(_GRPC_CPP_PLUGIN_EXECUTABLE grpc_cpp_plugin)
include_directories(${CMAKE_CURRENT_BINARY_DIR}) include_directories(${CMAKE_CURRENT_BINARY_DIR})
include_directories(${Protobuf_INCLUDE_DIRS})
message(STATUS "Using protobuf ${Protobuf_VERSION} ${CMAKE_CURRENT_BINARY_DIR} $ENV{MY_INSTALL_DIR}/include") message(STATUS "Using protobuf ${Protobuf_VERSION} ${Protobuf_INCLUDE_DIRS} ${CMAKE_CURRENT_BINARY_DIR}")
# Proto file # Proto file

View file

@ -108,6 +108,7 @@ public:
ctx_for_embedding = llama_init_from_gpt_params(params); ctx_for_embedding = llama_init_from_gpt_params(params);
} }
params.embedding = false; params.embedding = false;
fprintf(stderr, "%s: loading model\n", __func__);
ctx = llama_init_from_gpt_params(params); ctx = llama_init_from_gpt_params(params);
if (ctx == NULL || (has_embedding && ctx_for_embedding == NULL)) if (ctx == NULL || (has_embedding && ctx_for_embedding == NULL))
{ {
@ -116,6 +117,7 @@ public:
} }
else else
{ {
fprintf(stderr, "%s: model loaded\n", __func__);
loaded = true; loaded = true;
last_n_tokens.resize(params.n_ctx); last_n_tokens.resize(params.n_ctx);
std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0); std::fill(last_n_tokens.begin(), last_n_tokens.end(), 0);
@ -749,6 +751,7 @@ int main(int argc, char **argv)
// load the model // load the model
if (!llama.loaded) if (!llama.loaded)
{ {
fprintf(stderr, "error: failed to load model\n");
return 1; return 1;
} }