Merge pull request #7 from NexaAI/master

Remove C++20 coding and suport Microsoft Visual Studio Compilation
This commit is contained in:
Zack Li 2024-11-04 22:00:30 -08:00 committed by GitHub
commit d6c0627d31
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
5 changed files with 54 additions and 39 deletions

View file

@ -11,8 +11,17 @@
#include <variant>
#include <cmath>
#include <cxxabi.h>
#define NEXA_CLASS_NAME (abi::__cxa_demangle(typeid(*this).name(), nullptr, nullptr, nullptr))
// Replace the cxxabi.h include and NEXA_CLASS_NAME definition with cross-platform version
#ifdef _MSC_VER
// Windows/MSVC version
#include <typeinfo>
#define NEXA_CLASS_NAME (typeid(*this).name())
#else
// Unix/GCC/Clang version
#include <cxxabi.h>
#define NEXA_CLASS_NAME (abi::__cxa_demangle(typeid(*this).name(), nullptr, nullptr, nullptr))
#endif
#define NEXA_LOG(fmt, ...) fprintf(stderr, "%s::%s: " fmt "\n", NEXA_CLASS_NAME, __func__, ##__VA_ARGS__)
// Prints the content of a ggml_tensor with specified precision. Can use the backend if available.

View file

@ -523,13 +523,12 @@ bool omni_context_params_parse(int argc, char **argv, omni_context_params &param
omni_context_params omni_context_default_params()
{
omni_context_params params = {
.model = "",
.mmproj = "",
.file = "",
.prompt = "this conversation talks about",
.n_gpu_layers = -1,
};
omni_context_params params;
params.model = "";
params.mmproj = "";
params.file = "";
params.prompt = "this conversation talks about";
params.n_gpu_layers = -1;
return params;
}
@ -565,17 +564,16 @@ bool omni_params_parse(int argc, char **argv, omni_params &params)
static omni_params get_omni_params_from_context_params(omni_context_params &params)
{
omni_params all_params = {
.gpt = {
.n_gpu_layers = params.n_gpu_layers,
.model = params.model,
.prompt = params.prompt,
},
.whisper = {
.model = params.mmproj,
.fname_inp = {params.file},
},
};
omni_params all_params;
// Initialize gpt params
all_params.gpt.n_gpu_layers = params.n_gpu_layers;
all_params.gpt.model = params.model;
all_params.gpt.prompt = params.prompt;
// Initialize whisper params
all_params.whisper.model = params.mmproj;
all_params.whisper.fname_inp = {params.file};
if (all_params.gpt.n_threads <= 0)
{

View file

@ -60,6 +60,11 @@
#include <functional>
#include <codecvt>
#ifdef _WIN32
#include <io.h>
#include <fcntl.h>
#endif
// third-party utilities
// use your favorite implementations
#define DR_WAV_IMPLEMENTATION

View file

@ -523,13 +523,12 @@ bool omni_context_params_parse(int argc, char **argv, omni_context_params &param
omni_context_params omni_context_default_params()
{
omni_context_params params = {
.model = "",
.mmproj = "",
.file = "",
.prompt = "this conversation talks about",
.n_gpu_layers = -1,
};
omni_context_params params;
params.model = "";
params.mmproj = "";
params.file = "";
params.prompt = "this conversation talks about";
params.n_gpu_layers = -1;
return params;
}
@ -565,18 +564,17 @@ bool omni_params_parse(int argc, char **argv, omni_params &params)
static omni_params get_omni_params_from_context_params(omni_context_params &params)
{
omni_params all_params = {
.gpt = {
.n_gpu_layers = params.n_gpu_layers,
.model = params.model,
.prompt = params.prompt,
},
.whisper = {
.model = params.mmproj,
.fname_inp = {params.file},
},
};
omni_params all_params;
// Initialize gpt params
all_params.gpt.n_gpu_layers = params.n_gpu_layers;
all_params.gpt.model = params.model;
all_params.gpt.prompt = params.prompt;
// Initialize whisper params
all_params.whisper.model = params.mmproj;
all_params.whisper.fname_inp = {params.file};
if (all_params.gpt.n_threads <= 0)
{
all_params.gpt.n_threads = std::thread::hardware_concurrency();

View file

@ -60,6 +60,11 @@
#include <functional>
#include <codecvt>
#ifdef _WIN32
#include <io.h> // for _setmode
#include <fcntl.h> // for _O_BINARY
#endif
// third-party utilities
// use your favorite implementations
#define DR_WAV_IMPLEMENTATION