From 191da330fc2c597d7aed1152d389e7e9849d3252 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 11 Dec 2024 16:50:40 +0200 Subject: [PATCH] clean-up --- examples/tts/tts.cpp | 354 +++++++++++++++++++++++-------------------- ggml/src/ggml.c | 4 - src/llama.cpp | 5 - 3 files changed, 191 insertions(+), 172 deletions(-) diff --git a/examples/tts/tts.cpp b/examples/tts/tts.cpp index 8eaf5a262..a3b923332 100644 --- a/examples/tts/tts.cpp +++ b/examples/tts/tts.cpp @@ -107,26 +107,29 @@ static void irfft(int n, const double * inp_cplx, double * out_real) { } } -static void fold( - const std::vector & data, - int64_t output_size, - int64_t win_length, - int64_t hop_length, - int64_t pad, - std::vector& output -) { - int64_t output_height = output_size; - int64_t kernel_w = win_length; - int64_t stride_w = hop_length; - - int64_t width = output_size; +// +// y = torch.nn.functional.fold( +// data, output_size=(1, output_size), kernel_size=(1, self.win_length), stride=(1, self.hop_length), +// )[:, 0, 0, pad:-pad] +// +// data.shape = torch.Size([1, 1280, 261]) +// output_size = 84480 +// win_length = 1280 +// hop_length = 320 +// pad = 480 +// +static void fold(const std::vector & data, int64_t n_out, int64_t n_win, int64_t n_hop, int64_t n_pad, std::vector & output) { + int64_t output_height = n_out; + int64_t kernel_w = n_win; + int64_t stride_w = n_hop; + int64_t width = n_out; output.resize(width, 0.0f); int64_t col_idx = 0; for (int64_t w_col = 0; w_col < width; ++w_col) { - int64_t start = w_col * stride_w - pad; - int64_t end = start + kernel_w; + int64_t start = w_col * stride_w - n_pad; + int64_t end = start + kernel_w; for (int64_t w_im = start; w_im < end; ++w_im) { if (w_im >= 0 && w_im < output_height) { @@ -136,124 +139,55 @@ static void fold( } } - output.resize(output_size - 2 * pad); + output.resize(n_out - 2 * n_pad); } -int main(int argc, char ** argv) { - common_params params; +struct wav_header { + char riff[4] = {'R', 'I', 'F', 'F'}; + uint32_t chunk_size; + char wave[4] = {'W', 'A', 'V', 'E'}; + char fmt[4] = {'f', 'm', 't', ' '}; + uint32_t fmt_chunk_size = 16; + uint16_t audio_format = 1; // PCM + uint16_t num_channels = 1; // Mono + uint32_t sample_rate; + uint32_t byte_rate; + uint16_t block_align; + uint16_t bits_per_sample = 16; + char data[4] = {'d', 'a', 't', 'a'}; + uint32_t data_size; +}; - params.prompt = ""; - - params.n_predict = 1024; - params.n_batch = 8192; - params.n_ctx = 8192; - - if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_TTS, print_usage)) { - return 1; +static void save_wav16(const std::string & fname, const std::vector & data, int sample_rate) { + std::ofstream file(fname, std::ios::binary); + if (!file) { + LOG_ERR("%s: Failed to open file '%s' for writing", __func__, fname.c_str()); + return; } - common_init(); + wav_header header; + header.sample_rate = sample_rate; + header.byte_rate = header.sample_rate * header.num_channels * (header.bits_per_sample / 8); + header.block_align = header.num_channels * (header.bits_per_sample / 8); + header.data_size = data.size() * (header.bits_per_sample / 8); + header.chunk_size = 36 + header.data_size; - // init LLM + file.write(reinterpret_cast(&header), sizeof(header)); - llama_backend_init(); - llama_numa_init(params.numa); - - llama_model * model_ttc = NULL; // text-to-codes - llama_model * model_cts = NULL; // codes-to-speech - - llama_context * ctx_ttc = NULL; - llama_context * ctx_cts = NULL; - - common_init_result llama_init_ttc = common_init_from_params(params); - model_ttc = llama_init_ttc.model; - ctx_ttc = llama_init_ttc.context; - - params.model = params.vocoder.model; - params.embedding = true; - - common_init_result llama_init_cts = common_init_from_params(params); - model_cts = llama_init_cts.model; - ctx_cts = llama_init_cts.context; - - const auto t_main_start = ggml_time_us(); - - std::vector prompt_inp = {198, 88225, 155856, 151669, 152205, - 153064, 152537, 153421, 153209, 152524, 151689, 152993, 152438, 152695, - 153091, 152945, 152829, 152534, 152934, 153020, 151997, 152263, 153010, - 153146, 152399, 153208, 152496, 151793, 152848, 152263, 152571, 153286, - 152227, 153300, 152934, 152263, 153208, 152263, 152965, 152430, 152296, - 153146, 152920, 152376, 152556, 153363, 151775, 152044, 152972, 152690, - 153379, 152368, 152233, 153422, 152490, 151996, 152022, 151694, 152061, - 153238, 152539, 153356, 152640, 153021, 153123, 151962, 153094, 151670, - 198, 20339, 13189, 155824, 151669, 152070, 152007, 152910, 151683, - 152000, 152373, 152760, 152046, 151735, 152334, 152394, 153073, 152908, - 151856, 151953, 153247, 153293, 151903, 153480, 153168, 152478, 153359, - 153429, 151905, 151678, 152567, 152411, 152165, 152556, 153075, 153424, - 151993, 152999, 153078, 152151, 152088, 153389, 152484, 151874, 151670, - 198, 285, 155784, 151669, 152226, 152126, 152638, 153215, 151729, - 152959, 153479, 153059, 151838, 151670, 198, 1782, 155783, 151669, - 153288, 153055, 153314, 152497, 152962, 152741, 152076, 153253, 151670, - 198, 471, 16488, 155825, 151669, 152060, 152916, 151893, 153469, 152501, - 152080, 152743, 151932, 153161, 152096, 152761, 152698, 153401, 153242, - 153336, 152441, 152838, 153467, 152706, 153496, 153310, 152422, 153360, - 153115, 152763, 151998, 152373, 153450, 152554, 151968, 153323, 152055, - 152468, 153111, 153358, 152813, 152010, 151770, 152823, 152960, 151670, - 198, 22627, 155823, 151669, 152814, 152366, 153484, 152931, 153441, - 152164, 152877, 152915, 153463, 151692, 152911, 152747, 152776, 151831, - 153449, 151882, 152975, 152031, 152513, 153150, 152448, 152667, 153133, - 153189, 152619, 153466, 152054, 152106, 153119, 152277, 152439, 153109, - 152997, 152141, 153154, 153256, 153311, 151922, 151670, 198, 1055, - 155781, 151669, 152633, 151850, 153060, 153270, 152560, 153348, 152729, - 151670, 198, 25312, 155803, 151669, 152521, 153403, 152561, 153337, - 153383, 152199, 153493, 153326, 151830, 152254, 152248, 152349, 152153, - 153007, 151823, 153037, 152575, 152457, 152406, 152592, 153116, 153365, - 153456, 151670, 198, 88225, 155817, 151669, 153271, 151925, 152218, - 152418, 152253, 153140, 151903, 153151, 152626, 152338, 152647, 153464, - 152785, 152768, 151711, 152037, 152033, 151804, 152216, 151701, 151855, - 152348, 152995, 152955, 152905, 152342, 152340, 153391, 153453, 152418, - 153415, 151990, 153083, 152884, 151670, 198, 151668, 198, 151645}; - - { - const std::string inp_txt = common_detokenize(ctx_ttc, prompt_inp, true); - LOG_INF("prompt: '%s'\n", inp_txt.c_str()); - LOG_INF("%s: prompt size: %d\n", __func__, (int) prompt_inp.size()); + for (const auto & sample : data) { + int16_t pcm_sample = static_cast(std::clamp(sample * 32767.0, -32768.0, 32767.0)); + file.write(reinterpret_cast(&pcm_sample), sizeof(pcm_sample)); } - // remove all non-audio tokens (i.e. < 151672 || > 155772) - prompt_inp.erase(std::remove_if(prompt_inp.begin(), prompt_inp.end(), [](llama_token t) { return t < 151672 || t > 155772; }), prompt_inp.end()); + file.close(); +} - { - const std::string inp_txt = common_detokenize(ctx_ttc, prompt_inp, true); - LOG_INF("prompt audio: '%s'\n", inp_txt.c_str()); - LOG_INF("%s: prompt audio size: %d\n", __func__, (int) prompt_inp.size()); - } - - for (auto & token : prompt_inp) { - token -= 151672; - } - - llama_batch batch = llama_batch_init(prompt_inp.size(), 0, 1); - - // evaluate the initial prompt - for (size_t i = 0; i < prompt_inp.size(); ++i) { - common_batch_add(batch, prompt_inp[i], i, { 0 }, true); // TODO: all logits? - } - GGML_ASSERT(batch.n_tokens == (int) prompt_inp.size()); - - if (llama_decode(ctx_cts, batch) != 0) { - LOG_ERR("%s: llama_decode() failed\n", __func__); - return 1; - } - - llama_synchronize(ctx_cts); - - LOG_INF("%s: time for prompt: %.3f ms\n", __func__, (ggml_time_us() - t_main_start) / 1000.0f); - - const int n_embd = llama_n_embd(model_cts); - const float * embd = llama_get_embeddings(ctx_cts); - - const int n = prompt_inp.size(); +static std::vector embd_to_audio( + const float * embd, + const std::vector & codes, + const int n_embd, + const int n_thread) { + const int n = codes.size(); const int n_fft = 1280; const int n_hop = 320; const int n_win = 1280; @@ -301,7 +235,6 @@ int main(int argc, char ** argv) { std::vector res (n*n_fft); std::vector hann2(n*n_fft); - const int n_thread = std::thread::hardware_concurrency(); std::vector workers(n_thread); for (int i = 0; i < n_thread; ++i) { workers[i] = std::thread([&, i]() { @@ -318,56 +251,151 @@ int main(int argc, char ** argv) { workers[i].join(); } - //LOG("result (%d):\n", res.size()); - //for (int i = 0; i < n_fft; ++i) { - // LOG("%d - %8.5f\n", i, res[5*n_fft + i]); - //} - //LOG("\n"); - //double sum = 0.0; - //for (int i = 0; i < n_fft; ++i) { - // sum += res[5*n_fft + i]; - //} - //LOG("sum: %f\n", sum); - std::vector audio; std::vector env; fold(res, n_out, n_win, n_hop, n_pad, audio); - fold(hann2, n_out, n_win, n_hop, n_pad, env); + fold(hann2, n_out, n_win, n_hop, n_pad, env); // TODO: can be done once for (size_t i = 0; i < audio.size(); ++i) { audio[i] /= env[i]; } - //LOG("audio (%d):\n", audio.size()); - //for (int i = 0; i < 1000; ++i) { - // LOG("%d: %8.5f\n", i, audio[i]); - //} - //LOG("\n"); - //double sum = 0.0; - //for (int i = 0; i < 1000; ++i) { - // sum += audio[i]; - //} - //LOG("sum: %f\n", sum); + return audio; +} - //{ - // LOG("result:\n"); - // for (int i = 0; i < 10; ++i) { - // LOG("%8.3f ", S[i]); - // } - // LOG("\n"); - // for (int i = n_spec - 10; i < n_spec; ++i) { - // LOG("%8.3f ", S[i]); - // } - // LOG("\n"); - // double sum = 0.0; - // for (int i = 0; i < n_spec; ++i) { - // sum += S[i]; - // } - // LOG("sum: %f\n", sum); - //} +int main(int argc, char ** argv) { + common_params params; - fprintf(stderr, "\n"); + params.prompt = ""; + + params.n_predict = 1024; + params.n_batch = 8192; + params.n_ctx = 8192; + + if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_TTS, print_usage)) { + return 1; + } + + common_init(); + + // init LLM + + llama_backend_init(); + llama_numa_init(params.numa); + + llama_model * model_ttc = NULL; // text-to-codes + llama_model * model_cts = NULL; // codes-to-speech + + llama_context * ctx_ttc = NULL; + llama_context * ctx_cts = NULL; + + common_init_result llama_init_ttc = common_init_from_params(params); + model_ttc = llama_init_ttc.model; + ctx_ttc = llama_init_ttc.context; + + params.model = params.vocoder.model; + params.embedding = true; + + common_init_result llama_init_cts = common_init_from_params(params); + model_cts = llama_init_cts.model; + ctx_cts = llama_init_cts.context; + + const auto t_main_start = ggml_time_us(); + + std::vector codes = {198, 88225, 155856, 151669, 152205, + 153064, 152537, 153421, 153209, 152524, 151689, 152993, 152438, 152695, + 153091, 152945, 152829, 152534, 152934, 153020, 151997, 152263, 153010, + 153146, 152399, 153208, 152496, 151793, 152848, 152263, 152571, 153286, + 152227, 153300, 152934, 152263, 153208, 152263, 152965, 152430, 152296, + 153146, 152920, 152376, 152556, 153363, 151775, 152044, 152972, 152690, + 153379, 152368, 152233, 153422, 152490, 151996, 152022, 151694, 152061, + 153238, 152539, 153356, 152640, 153021, 153123, 151962, 153094, 151670, + 198, 20339, 13189, 155824, 151669, 152070, 152007, 152910, 151683, + 152000, 152373, 152760, 152046, 151735, 152334, 152394, 153073, 152908, + 151856, 151953, 153247, 153293, 151903, 153480, 153168, 152478, 153359, + 153429, 151905, 151678, 152567, 152411, 152165, 152556, 153075, 153424, + 151993, 152999, 153078, 152151, 152088, 153389, 152484, 151874, 151670, + 198, 285, 155784, 151669, 152226, 152126, 152638, 153215, 151729, + 152959, 153479, 153059, 151838, 151670, 198, 1782, 155783, 151669, + 153288, 153055, 153314, 152497, 152962, 152741, 152076, 153253, 151670, + 198, 471, 16488, 155825, 151669, 152060, 152916, 151893, 153469, 152501, + 152080, 152743, 151932, 153161, 152096, 152761, 152698, 153401, 153242, + 153336, 152441, 152838, 153467, 152706, 153496, 153310, 152422, 153360, + 153115, 152763, 151998, 152373, 153450, 152554, 151968, 153323, 152055, + 152468, 153111, 153358, 152813, 152010, 151770, 152823, 152960, 151670, + 198, 22627, 155823, 151669, 152814, 152366, 153484, 152931, 153441, + 152164, 152877, 152915, 153463, 151692, 152911, 152747, 152776, 151831, + 153449, 151882, 152975, 152031, 152513, 153150, 152448, 152667, 153133, + 153189, 152619, 153466, 152054, 152106, 153119, 152277, 152439, 153109, + 152997, 152141, 153154, 153256, 153311, 151922, 151670, 198, 1055, + 155781, 151669, 152633, 151850, 153060, 153270, 152560, 153348, 152729, + 151670, 198, 25312, 155803, 151669, 152521, 153403, 152561, 153337, + 153383, 152199, 153493, 153326, 151830, 152254, 152248, 152349, 152153, + 153007, 151823, 153037, 152575, 152457, 152406, 152592, 153116, 153365, + 153456, 151670, 198, 88225, 155817, 151669, 153271, 151925, 152218, + 152418, 152253, 153140, 151903, 153151, 152626, 152338, 152647, 153464, + 152785, 152768, 151711, 152037, 152033, 151804, 152216, 151701, 151855, + 152348, 152995, 152955, 152905, 152342, 152340, 153391, 153453, 152418, + 153415, 151990, 153083, 152884, 151670, 198, 151668, 198, 151645}; + + { + const std::string inp_txt = common_detokenize(ctx_ttc, codes, true); + LOG_INF("prompt: '%s'\n", inp_txt.c_str()); + LOG_INF("%s: prompt size: %d\n", __func__, (int) codes.size()); + } + + // remove all non-audio tokens (i.e. < 151672 || > 155772) + codes.erase(std::remove_if(codes.begin(), codes.end(), [](llama_token t) { return t < 151672 || t > 155772; }), codes.end()); + + { + const std::string inp_txt = common_detokenize(ctx_ttc, codes, true); + LOG_INF("prompt audio: '%s'\n", inp_txt.c_str()); + LOG_INF("%s: prompt audio size: %d\n", __func__, (int) codes.size()); + } + + for (auto & token : codes) { + token -= 151672; + } + + const auto t_voc_start = ggml_time_us(); + + llama_batch batch = llama_batch_init(codes.size(), 0, 1); + + // evaluate the initial prompt + for (size_t i = 0; i < codes.size(); ++i) { + common_batch_add(batch, codes[i], i, { 0 }, true); // TODO: all logits? + } + GGML_ASSERT(batch.n_tokens == (int) codes.size()); + + if (llama_decode(ctx_cts, batch) != 0) { + LOG_ERR("%s: llama_decode() failed\n", __func__); + return 1; + } + + llama_synchronize(ctx_cts); + + LOG_INF("%s: time for vocoder: %.3f ms\n", __func__, (ggml_time_us() - t_voc_start) / 1000.0f); + + const auto t_spec_start = ggml_time_us(); + + const int n_embd = llama_n_embd(model_cts); + const float * embd = llama_get_embeddings(ctx_cts); + + // spectral operations + // TODO: not optimized at all + auto audio = embd_to_audio(embd, codes, n_embd, params.cpuparams.n_threads); + + const std::string fname = "output.wav"; + + const int n_sr = 24000; // sampling rate + + LOG_INF("%s: time for spectral ops: %.3f ms\n", __func__, (ggml_time_us() - t_spec_start) / 1000.0f); + LOG_INF("%s: total time: %.3f ms\n", __func__, (ggml_time_us() - t_main_start) / 1000.0f); + + save_wav16(fname, audio, n_sr); + + LOG_INF("%s: audio written to file '%s'\n", __func__, fname.c_str()); llama_free(ctx_ttc); llama_free_model(model_ttc); diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 7c0159ab4..2bbe5f482 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -3846,10 +3846,6 @@ struct ggml_tensor * ggml_conv_1d( int d0) { struct ggml_tensor * im2col = ggml_im2col(ctx, a, b, s0, 0, p0, 0, d0, 0, false, GGML_TYPE_F16); // [N, OL, IC * K] - printf("a: %lld %lld %lld %lld\n", a->ne[0], a->ne[1], a->ne[2], a->ne[3]); - printf("b: %lld %lld %lld %lld\n", b->ne[0], b->ne[1], b->ne[2], b->ne[3]); - printf("im2col: %lld %lld %lld %lld\n", im2col->ne[0], im2col->ne[1], im2col->ne[2], im2col->ne[3]); - struct ggml_tensor * result = ggml_mul_mat(ctx, ggml_reshape_2d(ctx, im2col, im2col->ne[0], (im2col->ne[2] * im2col->ne[1])), // [N, OL, IC * K] => [N*OL, IC * K] diff --git a/src/llama.cpp b/src/llama.cpp index 07a00b2f6..6397decd7 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -17234,9 +17234,6 @@ struct llm_build_context { cur = ggml_cont(ctx0, ggml_transpose(ctx0, inpL)); - printf("cur: %d %d %d\n", cur->ne[0], cur->ne[1], cur->ne[2]); - printf("conv1d: %d %d %d\n", model.conv_1d->ne[0], model.conv_1d->ne[1], model.conv_1d->ne[2]); - cur = ggml_conv_1d_ph(ctx0, model.conv_1d, cur, 1, 1); cur = ggml_add(ctx0, cur, ggml_reshape_2d(ctx0, model.conv_1d_b, 1, model.conv_1d_b->ne[0])); @@ -17445,8 +17442,6 @@ struct llm_build_context { cur = ggml_add(ctx0, cur, model.output_b); cb(cur, "result_embd", -1); - printf("cur: %d %d %d\n", cur->ne[0], cur->ne[1], cur->ne[2]); - ggml_build_forward_expand(gf, cur); return gf;