From d8aa96496c40b8d3ef1b8af6d401660d411464f6 Mon Sep 17 00:00:00 2001 From: mike dupont Date: Sat, 18 Nov 2023 08:55:36 -0500 Subject: [PATCH] now emit the json format --- examples/main/main.cpp | 2 +- ggml-cuda.cu | 10 ++++------ 2 files changed, 5 insertions(+), 7 deletions(-) diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 5daafa39e..6f60a47a9 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -644,7 +644,7 @@ int main(int argc, char ** argv) { if (input_echo) { for (auto id : embd) { const std::string token_str = llama_token_to_piece(ctx, id); - printf("%s", token_str.c_str()); + printf("TOKEN:%s\n", token_str.c_str()); if (embd.size() > 1) { input_tokens.push_back(id); diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 66eb0946c..504f7a06f 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -7593,12 +7593,10 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1 #endif // debug helpers - printf("src0: %8d %8d %8d %8d\n", src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3]); - printf(" %8d %8d %8d %8d\n", src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3]); - printf("src1: %8d %8d %8d %8d\n", src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3]); - printf(" %8d %8d %8d %8d\n", src1->nb[0], src1->nb[1], src1->nb[2], src1->nb[3]); - printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name); - printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); + printf("JSON { \"data\":{ \"src0\": { \"ne\" : [ %8d, %8d, %8d, %8d ], \"nb\" : [ %8d, %8d, %8d, %8d ], \"contiguous\":\"%d\", \"transposed\":\"%d\", \"type\": \"%s\", \"name\" : \"%s\"}, \"src1\": { \"ne\" : [ %8d, %8d, %8d, %8d ], \"nb\" : [ %8d, %8d, %8d, %8d ], \"contiguous\":\"%d\", \"transposed\":\"%d\", \"type\": \"%s\", \"name\" : \"%s\"}}}\n", + src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], + ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name, + src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3], src1->nb[0], src1->nb[1], src1->nb[2], src1->nb[3], ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name); if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) { // KQ single-batch