Merge pull request #6 from NexaAI/master-release-audio-lm
Remove C++20 coding and suport Microsoft Visual Studio Compilation
This commit is contained in:
commit
91b3cafbb5
5 changed files with 54 additions and 39 deletions
|
@ -11,8 +11,17 @@
|
|||
#include <variant>
|
||||
#include <cmath>
|
||||
|
||||
#include <cxxabi.h>
|
||||
#define NEXA_CLASS_NAME (abi::__cxa_demangle(typeid(*this).name(), nullptr, nullptr, nullptr))
|
||||
// Replace the cxxabi.h include and NEXA_CLASS_NAME definition with cross-platform version
|
||||
#ifdef _MSC_VER
|
||||
// Windows/MSVC version
|
||||
#include <typeinfo>
|
||||
#define NEXA_CLASS_NAME (typeid(*this).name())
|
||||
#else
|
||||
// Unix/GCC/Clang version
|
||||
#include <cxxabi.h>
|
||||
#define NEXA_CLASS_NAME (abi::__cxa_demangle(typeid(*this).name(), nullptr, nullptr, nullptr))
|
||||
#endif
|
||||
|
||||
#define NEXA_LOG(fmt, ...) fprintf(stderr, "%s::%s: " fmt "\n", NEXA_CLASS_NAME, __func__, ##__VA_ARGS__)
|
||||
|
||||
// Prints the content of a ggml_tensor with specified precision. Can use the backend if available.
|
||||
|
|
|
@ -523,13 +523,12 @@ bool omni_context_params_parse(int argc, char **argv, omni_context_params ¶m
|
|||
|
||||
omni_context_params omni_context_default_params()
|
||||
{
|
||||
omni_context_params params = {
|
||||
.model = "",
|
||||
.mmproj = "",
|
||||
.file = "",
|
||||
.prompt = "this conversation talks about",
|
||||
.n_gpu_layers = -1,
|
||||
};
|
||||
omni_context_params params;
|
||||
params.model = "";
|
||||
params.mmproj = "";
|
||||
params.file = "";
|
||||
params.prompt = "this conversation talks about";
|
||||
params.n_gpu_layers = -1;
|
||||
return params;
|
||||
}
|
||||
|
||||
|
@ -565,17 +564,16 @@ bool omni_params_parse(int argc, char **argv, omni_params ¶ms)
|
|||
|
||||
static omni_params get_omni_params_from_context_params(omni_context_params ¶ms)
|
||||
{
|
||||
omni_params all_params = {
|
||||
.gpt = {
|
||||
.n_gpu_layers = params.n_gpu_layers,
|
||||
.model = params.model,
|
||||
.prompt = params.prompt,
|
||||
},
|
||||
.whisper = {
|
||||
.model = params.mmproj,
|
||||
.fname_inp = {params.file},
|
||||
},
|
||||
};
|
||||
omni_params all_params;
|
||||
|
||||
// Initialize gpt params
|
||||
all_params.gpt.n_gpu_layers = params.n_gpu_layers;
|
||||
all_params.gpt.model = params.model;
|
||||
all_params.gpt.prompt = params.prompt;
|
||||
|
||||
// Initialize whisper params
|
||||
all_params.whisper.model = params.mmproj;
|
||||
all_params.whisper.fname_inp = {params.file};
|
||||
|
||||
if (all_params.gpt.n_threads <= 0)
|
||||
{
|
||||
|
|
|
@ -60,6 +60,11 @@
|
|||
#include <functional>
|
||||
#include <codecvt>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <io.h>
|
||||
#include <fcntl.h>
|
||||
#endif
|
||||
|
||||
// third-party utilities
|
||||
// use your favorite implementations
|
||||
#define DR_WAV_IMPLEMENTATION
|
||||
|
|
|
@ -523,13 +523,12 @@ bool omni_context_params_parse(int argc, char **argv, omni_context_params ¶m
|
|||
|
||||
omni_context_params omni_context_default_params()
|
||||
{
|
||||
omni_context_params params = {
|
||||
.model = "",
|
||||
.mmproj = "",
|
||||
.file = "",
|
||||
.prompt = "this conversation talks about",
|
||||
.n_gpu_layers = -1,
|
||||
};
|
||||
omni_context_params params;
|
||||
params.model = "";
|
||||
params.mmproj = "";
|
||||
params.file = "";
|
||||
params.prompt = "this conversation talks about";
|
||||
params.n_gpu_layers = -1;
|
||||
return params;
|
||||
}
|
||||
|
||||
|
@ -565,18 +564,17 @@ bool omni_params_parse(int argc, char **argv, omni_params ¶ms)
|
|||
|
||||
static omni_params get_omni_params_from_context_params(omni_context_params ¶ms)
|
||||
{
|
||||
omni_params all_params = {
|
||||
.gpt = {
|
||||
.n_gpu_layers = params.n_gpu_layers,
|
||||
.model = params.model,
|
||||
.prompt = params.prompt,
|
||||
},
|
||||
.whisper = {
|
||||
.model = params.mmproj,
|
||||
.fname_inp = {params.file},
|
||||
},
|
||||
};
|
||||
|
||||
omni_params all_params;
|
||||
|
||||
// Initialize gpt params
|
||||
all_params.gpt.n_gpu_layers = params.n_gpu_layers;
|
||||
all_params.gpt.model = params.model;
|
||||
all_params.gpt.prompt = params.prompt;
|
||||
|
||||
// Initialize whisper params
|
||||
all_params.whisper.model = params.mmproj;
|
||||
all_params.whisper.fname_inp = {params.file};
|
||||
|
||||
if (all_params.gpt.n_threads <= 0)
|
||||
{
|
||||
all_params.gpt.n_threads = std::thread::hardware_concurrency();
|
||||
|
|
|
@ -60,6 +60,11 @@
|
|||
#include <functional>
|
||||
#include <codecvt>
|
||||
|
||||
#ifdef _WIN32
|
||||
#include <io.h> // for _setmode
|
||||
#include <fcntl.h> // for _O_BINARY
|
||||
#endif
|
||||
|
||||
// third-party utilities
|
||||
// use your favorite implementations
|
||||
#define DR_WAV_IMPLEMENTATION
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue