From d42e0371f84b413c25511328d75f079962c6fbbb Mon Sep 17 00:00:00 2001 From: Zack Zhiyuan Li Date: Mon, 4 Nov 2024 22:50:33 +0000 Subject: [PATCH] remove C++20 style --- examples/nexa-omni-audio/omni.cpp | 21 ++++++++++----------- examples/qwen2-audio/qwen2.cpp | 23 +++++++++++------------ 2 files changed, 21 insertions(+), 23 deletions(-) diff --git a/examples/nexa-omni-audio/omni.cpp b/examples/nexa-omni-audio/omni.cpp index d2701c2c1..0982a79df 100644 --- a/examples/nexa-omni-audio/omni.cpp +++ b/examples/nexa-omni-audio/omni.cpp @@ -565,17 +565,16 @@ bool omni_params_parse(int argc, char **argv, omni_params ¶ms) static omni_params get_omni_params_from_context_params(omni_context_params ¶ms) { - omni_params all_params = { - .gpt = { - .n_gpu_layers = params.n_gpu_layers, - .model = params.model, - .prompt = params.prompt, - }, - .whisper = { - .model = params.mmproj, - .fname_inp = {params.file}, - }, - }; + omni_params all_params; + + // Initialize gpt params + all_params.gpt.n_gpu_layers = params.n_gpu_layers; + all_params.gpt.model = params.model; + all_params.gpt.prompt = params.prompt; + + // Initialize whisper params + all_params.whisper.model = params.mmproj; + all_params.whisper.fname_inp = {params.file}; if (all_params.gpt.n_threads <= 0) { diff --git a/examples/qwen2-audio/qwen2.cpp b/examples/qwen2-audio/qwen2.cpp index c1636139b..d14145835 100644 --- a/examples/qwen2-audio/qwen2.cpp +++ b/examples/qwen2-audio/qwen2.cpp @@ -565,18 +565,17 @@ bool omni_params_parse(int argc, char **argv, omni_params ¶ms) static omni_params get_omni_params_from_context_params(omni_context_params ¶ms) { - omni_params all_params = { - .gpt = { - .n_gpu_layers = params.n_gpu_layers, - .model = params.model, - .prompt = params.prompt, - }, - .whisper = { - .model = params.mmproj, - .fname_inp = {params.file}, - }, - }; - + omni_params all_params; + + // Initialize gpt params + all_params.gpt.n_gpu_layers = params.n_gpu_layers; + all_params.gpt.model = params.model; + all_params.gpt.prompt = params.prompt; + + // Initialize whisper params + all_params.whisper.model = params.mmproj; + all_params.whisper.fname_inp = {params.file}; + if (all_params.gpt.n_threads <= 0) { all_params.gpt.n_threads = std::thread::hardware_concurrency();