Merge pull request #7 from NexaAI/master

Remove C++20 coding and suport Microsoft Visual Studio Compilation
This commit is contained in:
Zack Li 2024-11-04 22:00:30 -08:00 committed by GitHub
commit d6c0627d31
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 54 additions and 39 deletions

View file

@ -11,8 +11,17 @@
#include <variant> #include <variant>
#include <cmath> #include <cmath>
#include <cxxabi.h> // Replace the cxxabi.h include and NEXA_CLASS_NAME definition with cross-platform version
#define NEXA_CLASS_NAME (abi::__cxa_demangle(typeid(*this).name(), nullptr, nullptr, nullptr)) #ifdef _MSC_VER
// Windows/MSVC version
#include <typeinfo>
#define NEXA_CLASS_NAME (typeid(*this).name())
#else
// Unix/GCC/Clang version
#include <cxxabi.h>
#define NEXA_CLASS_NAME (abi::__cxa_demangle(typeid(*this).name(), nullptr, nullptr, nullptr))
#endif
#define NEXA_LOG(fmt, ...) fprintf(stderr, "%s::%s: " fmt "\n", NEXA_CLASS_NAME, __func__, ##__VA_ARGS__) #define NEXA_LOG(fmt, ...) fprintf(stderr, "%s::%s: " fmt "\n", NEXA_CLASS_NAME, __func__, ##__VA_ARGS__)
// Prints the content of a ggml_tensor with specified precision. Can use the backend if available. // Prints the content of a ggml_tensor with specified precision. Can use the backend if available.

View file

@ -523,13 +523,12 @@ bool omni_context_params_parse(int argc, char **argv, omni_context_params &param
omni_context_params omni_context_default_params() omni_context_params omni_context_default_params()
{ {
omni_context_params params = { omni_context_params params;
.model = "", params.model = "";
.mmproj = "", params.mmproj = "";
.file = "", params.file = "";
.prompt = "this conversation talks about", params.prompt = "this conversation talks about";
.n_gpu_layers = -1, params.n_gpu_layers = -1;
};
return params; return params;
} }
@ -565,17 +564,16 @@ bool omni_params_parse(int argc, char **argv, omni_params &params)
static omni_params get_omni_params_from_context_params(omni_context_params &params) static omni_params get_omni_params_from_context_params(omni_context_params &params)
{ {
omni_params all_params = { omni_params all_params;
.gpt = {
.n_gpu_layers = params.n_gpu_layers, // Initialize gpt params
.model = params.model, all_params.gpt.n_gpu_layers = params.n_gpu_layers;
.prompt = params.prompt, all_params.gpt.model = params.model;
}, all_params.gpt.prompt = params.prompt;
.whisper = {
.model = params.mmproj, // Initialize whisper params
.fname_inp = {params.file}, all_params.whisper.model = params.mmproj;
}, all_params.whisper.fname_inp = {params.file};
};
if (all_params.gpt.n_threads <= 0) if (all_params.gpt.n_threads <= 0)
{ {

View file

@ -60,6 +60,11 @@
#include <functional> #include <functional>
#include <codecvt> #include <codecvt>
#ifdef _WIN32
#include <io.h>
#include <fcntl.h>
#endif
// third-party utilities // third-party utilities
// use your favorite implementations // use your favorite implementations
#define DR_WAV_IMPLEMENTATION #define DR_WAV_IMPLEMENTATION

View file

@ -523,13 +523,12 @@ bool omni_context_params_parse(int argc, char **argv, omni_context_params &param
omni_context_params omni_context_default_params() omni_context_params omni_context_default_params()
{ {
omni_context_params params = { omni_context_params params;
.model = "", params.model = "";
.mmproj = "", params.mmproj = "";
.file = "", params.file = "";
.prompt = "this conversation talks about", params.prompt = "this conversation talks about";
.n_gpu_layers = -1, params.n_gpu_layers = -1;
};
return params; return params;
} }
@ -565,18 +564,17 @@ bool omni_params_parse(int argc, char **argv, omni_params &params)
static omni_params get_omni_params_from_context_params(omni_context_params &params) static omni_params get_omni_params_from_context_params(omni_context_params &params)
{ {
omni_params all_params = { omni_params all_params;
.gpt = {
.n_gpu_layers = params.n_gpu_layers, // Initialize gpt params
.model = params.model, all_params.gpt.n_gpu_layers = params.n_gpu_layers;
.prompt = params.prompt, all_params.gpt.model = params.model;
}, all_params.gpt.prompt = params.prompt;
.whisper = {
.model = params.mmproj, // Initialize whisper params
.fname_inp = {params.file}, all_params.whisper.model = params.mmproj;
}, all_params.whisper.fname_inp = {params.file};
};
if (all_params.gpt.n_threads <= 0) if (all_params.gpt.n_threads <= 0)
{ {
all_params.gpt.n_threads = std::thread::hardware_concurrency(); all_params.gpt.n_threads = std::thread::hardware_concurrency();

View file

@ -60,6 +60,11 @@
#include <functional> #include <functional>
#include <codecvt> #include <codecvt>
#ifdef _WIN32
#include <io.h> // for _setmode
#include <fcntl.h> // for _O_BINARY
#endif
// third-party utilities // third-party utilities
// use your favorite implementations // use your favorite implementations
#define DR_WAV_IMPLEMENTATION #define DR_WAV_IMPLEMENTATION