Merge pull request #6 from NexaAI/master-release-audio-lm
Remove C++20 coding and suport Microsoft Visual Studio Compilation
This commit is contained in:
commit
91b3cafbb5
5 changed files with 54 additions and 39 deletions
|
@ -11,8 +11,17 @@
|
||||||
#include <variant>
|
#include <variant>
|
||||||
#include <cmath>
|
#include <cmath>
|
||||||
|
|
||||||
#include <cxxabi.h>
|
// Replace the cxxabi.h include and NEXA_CLASS_NAME definition with cross-platform version
|
||||||
#define NEXA_CLASS_NAME (abi::__cxa_demangle(typeid(*this).name(), nullptr, nullptr, nullptr))
|
#ifdef _MSC_VER
|
||||||
|
// Windows/MSVC version
|
||||||
|
#include <typeinfo>
|
||||||
|
#define NEXA_CLASS_NAME (typeid(*this).name())
|
||||||
|
#else
|
||||||
|
// Unix/GCC/Clang version
|
||||||
|
#include <cxxabi.h>
|
||||||
|
#define NEXA_CLASS_NAME (abi::__cxa_demangle(typeid(*this).name(), nullptr, nullptr, nullptr))
|
||||||
|
#endif
|
||||||
|
|
||||||
#define NEXA_LOG(fmt, ...) fprintf(stderr, "%s::%s: " fmt "\n", NEXA_CLASS_NAME, __func__, ##__VA_ARGS__)
|
#define NEXA_LOG(fmt, ...) fprintf(stderr, "%s::%s: " fmt "\n", NEXA_CLASS_NAME, __func__, ##__VA_ARGS__)
|
||||||
|
|
||||||
// Prints the content of a ggml_tensor with specified precision. Can use the backend if available.
|
// Prints the content of a ggml_tensor with specified precision. Can use the backend if available.
|
||||||
|
|
|
@ -523,13 +523,12 @@ bool omni_context_params_parse(int argc, char **argv, omni_context_params ¶m
|
||||||
|
|
||||||
omni_context_params omni_context_default_params()
|
omni_context_params omni_context_default_params()
|
||||||
{
|
{
|
||||||
omni_context_params params = {
|
omni_context_params params;
|
||||||
.model = "",
|
params.model = "";
|
||||||
.mmproj = "",
|
params.mmproj = "";
|
||||||
.file = "",
|
params.file = "";
|
||||||
.prompt = "this conversation talks about",
|
params.prompt = "this conversation talks about";
|
||||||
.n_gpu_layers = -1,
|
params.n_gpu_layers = -1;
|
||||||
};
|
|
||||||
return params;
|
return params;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -565,17 +564,16 @@ bool omni_params_parse(int argc, char **argv, omni_params ¶ms)
|
||||||
|
|
||||||
static omni_params get_omni_params_from_context_params(omni_context_params ¶ms)
|
static omni_params get_omni_params_from_context_params(omni_context_params ¶ms)
|
||||||
{
|
{
|
||||||
omni_params all_params = {
|
omni_params all_params;
|
||||||
.gpt = {
|
|
||||||
.n_gpu_layers = params.n_gpu_layers,
|
// Initialize gpt params
|
||||||
.model = params.model,
|
all_params.gpt.n_gpu_layers = params.n_gpu_layers;
|
||||||
.prompt = params.prompt,
|
all_params.gpt.model = params.model;
|
||||||
},
|
all_params.gpt.prompt = params.prompt;
|
||||||
.whisper = {
|
|
||||||
.model = params.mmproj,
|
// Initialize whisper params
|
||||||
.fname_inp = {params.file},
|
all_params.whisper.model = params.mmproj;
|
||||||
},
|
all_params.whisper.fname_inp = {params.file};
|
||||||
};
|
|
||||||
|
|
||||||
if (all_params.gpt.n_threads <= 0)
|
if (all_params.gpt.n_threads <= 0)
|
||||||
{
|
{
|
||||||
|
|
|
@ -60,6 +60,11 @@
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <codecvt>
|
#include <codecvt>
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
#include <io.h>
|
||||||
|
#include <fcntl.h>
|
||||||
|
#endif
|
||||||
|
|
||||||
// third-party utilities
|
// third-party utilities
|
||||||
// use your favorite implementations
|
// use your favorite implementations
|
||||||
#define DR_WAV_IMPLEMENTATION
|
#define DR_WAV_IMPLEMENTATION
|
||||||
|
|
|
@ -523,13 +523,12 @@ bool omni_context_params_parse(int argc, char **argv, omni_context_params ¶m
|
||||||
|
|
||||||
omni_context_params omni_context_default_params()
|
omni_context_params omni_context_default_params()
|
||||||
{
|
{
|
||||||
omni_context_params params = {
|
omni_context_params params;
|
||||||
.model = "",
|
params.model = "";
|
||||||
.mmproj = "",
|
params.mmproj = "";
|
||||||
.file = "",
|
params.file = "";
|
||||||
.prompt = "this conversation talks about",
|
params.prompt = "this conversation talks about";
|
||||||
.n_gpu_layers = -1,
|
params.n_gpu_layers = -1;
|
||||||
};
|
|
||||||
return params;
|
return params;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -565,18 +564,17 @@ bool omni_params_parse(int argc, char **argv, omni_params ¶ms)
|
||||||
|
|
||||||
static omni_params get_omni_params_from_context_params(omni_context_params ¶ms)
|
static omni_params get_omni_params_from_context_params(omni_context_params ¶ms)
|
||||||
{
|
{
|
||||||
omni_params all_params = {
|
omni_params all_params;
|
||||||
.gpt = {
|
|
||||||
.n_gpu_layers = params.n_gpu_layers,
|
// Initialize gpt params
|
||||||
.model = params.model,
|
all_params.gpt.n_gpu_layers = params.n_gpu_layers;
|
||||||
.prompt = params.prompt,
|
all_params.gpt.model = params.model;
|
||||||
},
|
all_params.gpt.prompt = params.prompt;
|
||||||
.whisper = {
|
|
||||||
.model = params.mmproj,
|
// Initialize whisper params
|
||||||
.fname_inp = {params.file},
|
all_params.whisper.model = params.mmproj;
|
||||||
},
|
all_params.whisper.fname_inp = {params.file};
|
||||||
};
|
|
||||||
|
|
||||||
if (all_params.gpt.n_threads <= 0)
|
if (all_params.gpt.n_threads <= 0)
|
||||||
{
|
{
|
||||||
all_params.gpt.n_threads = std::thread::hardware_concurrency();
|
all_params.gpt.n_threads = std::thread::hardware_concurrency();
|
||||||
|
|
|
@ -60,6 +60,11 @@
|
||||||
#include <functional>
|
#include <functional>
|
||||||
#include <codecvt>
|
#include <codecvt>
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
#include <io.h> // for _setmode
|
||||||
|
#include <fcntl.h> // for _O_BINARY
|
||||||
|
#endif
|
||||||
|
|
||||||
// third-party utilities
|
// third-party utilities
|
||||||
// use your favorite implementations
|
// use your favorite implementations
|
||||||
#define DR_WAV_IMPLEMENTATION
|
#define DR_WAV_IMPLEMENTATION
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue