mpi : add support for distributed inference via MPI (#2099)
* MPI support, first cut * fix warnings, update README * fixes * wrap includes * PR comments * Update CMakeLists.txt * Add GH workflow, fix test * Add info to README * mpi : trying to move more MPI stuff into ggml-mpi (WIP) (#2099) * mpi : add names for layer inputs + prep ggml_mpi_graph_compute() * mpi : move all MPI logic into ggml-mpi Not tested yet * mpi : various fixes - communication now works but results are wrong * mpi : fix output tensor after MPI compute (still not working) * mpi : fix inference * mpi : minor * Add OpenMPI to GH action * [mpi] continue-on-error: true * mpi : fix after master merge * [mpi] Link MPI C++ libraries to fix OpenMPI * tests : fix new llama_backend API * [mpi] use MPI_INT32_T * mpi : factor out recv / send in functions and reuse * mpi : extend API to allow usage with outer backends (e.g. Metal) --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
This commit is contained in:
parent
1d16309969
commit
5656d10599
18 changed files with 460 additions and 35 deletions
|
@ -34,7 +34,7 @@ struct MyModel* create_mymodel(int argc, char ** argv) {
|
|||
}
|
||||
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
||||
|
||||
llama_init_backend(params.numa);
|
||||
llama_backend_init(params.numa);
|
||||
|
||||
llama_model * model;
|
||||
llama_context * ctx;
|
||||
|
|
|
@ -35,7 +35,7 @@ int main(int argc, char ** argv) {
|
|||
params.prompt = gpt_random_prompt(rng);
|
||||
}
|
||||
|
||||
llama_init_backend(params.numa);
|
||||
llama_backend_init(params.numa);
|
||||
|
||||
llama_model * model;
|
||||
llama_context * ctx;
|
||||
|
@ -93,5 +93,7 @@ int main(int argc, char ** argv) {
|
|||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ int main(int argc, char ** argv) {
|
|||
params.prompt = gpt_random_prompt(rng);
|
||||
}
|
||||
|
||||
llama_init_backend(params.numa);
|
||||
llama_backend_init(params.numa);
|
||||
|
||||
llama_model * model;
|
||||
llama_context * ctx;
|
||||
|
@ -671,5 +671,7 @@ int main(int argc, char ** argv) {
|
|||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -147,7 +147,7 @@ int main(int argc, char ** argv) {
|
|||
params.prompt = gpt_random_prompt(rng);
|
||||
}
|
||||
|
||||
llama_init_backend(params.numa);
|
||||
llama_backend_init(params.numa);
|
||||
|
||||
llama_model * model;
|
||||
llama_context * ctx;
|
||||
|
@ -172,5 +172,7 @@ int main(int argc, char ** argv) {
|
|||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -180,7 +180,7 @@ int main(int argc, char ** argv) {
|
|||
usage(argv[0]);
|
||||
}
|
||||
|
||||
llama_init_backend(false);
|
||||
llama_backend_init(false);
|
||||
|
||||
// parse command line arguments
|
||||
const std::string fname_inp = argv[arg_idx];
|
||||
|
@ -257,5 +257,7 @@ int main(int argc, char ** argv) {
|
|||
printf("%s: total time = %8.2f ms\n", __func__, (t_main_end_us - t_main_start_us)/1000.0);
|
||||
}
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -1079,7 +1079,7 @@ int main(int argc, char **argv)
|
|||
params.model_alias = params.model;
|
||||
}
|
||||
|
||||
llama_init_backend(params.numa);
|
||||
llama_backend_init(params.numa);
|
||||
|
||||
LOG_INFO("build info", {{"build", BUILD_NUMBER},
|
||||
{"commit", BUILD_COMMIT}});
|
||||
|
@ -1309,5 +1309,7 @@ int main(int argc, char **argv)
|
|||
return 1;
|
||||
}
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -66,7 +66,7 @@ int main(int argc, char ** argv)
|
|||
// Init LLM :
|
||||
//---------------------------------
|
||||
|
||||
llama_init_backend(params.numa);
|
||||
llama_backend_init(params.numa);
|
||||
|
||||
llama_model * model;
|
||||
llama_context * ctx;
|
||||
|
@ -173,6 +173,8 @@ int main(int argc, char ** argv)
|
|||
llama_free( ctx );
|
||||
llama_free_model( model );
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue