From 397b3399cb92d55d05c8fcfc94ab961b709fd78f Mon Sep 17 00:00:00 2001 From: Cebtenzzre Date: Wed, 30 Aug 2023 20:05:05 -0400 Subject: [PATCH] fix more MinGW warnings --- common/console.cpp | 1 + examples/beam-search/beam-search.cpp | 8 +++++--- examples/server/server.cpp | 6 ++++-- 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/common/console.cpp b/common/console.cpp index 8efa2a674..23545e5be 100644 --- a/common/console.cpp +++ b/common/console.cpp @@ -235,6 +235,7 @@ namespace console { int estimateWidth(char32_t codepoint) { #if defined(_WIN32) + (void)codepoint; return 1; #else return wcwidth(codepoint); diff --git a/examples/beam-search/beam-search.cpp b/examples/beam-search/beam-search.cpp index 42c7c7254..4d021434b 100644 --- a/examples/beam-search/beam-search.cpp +++ b/examples/beam-search/beam-search.cpp @@ -22,7 +22,9 @@ #include #elif defined (_WIN32) #define WIN32_LEAN_AND_MEAN -#define NOMINMAX +#ifndef NOMINMAX +# define NOMINMAX +#endif #include #include #endif @@ -73,7 +75,7 @@ void beam_search_callback(void * callback_data_ptr, llama_beams_state beams_stat assert(0u < beams_state.n_beams); const llama_token * tokens = beams_state.beam_views[0].tokens; std::copy(tokens, tokens + n, callback_data.response.end() - n); - printf("%lu", n); + printf("%zu", n); } fflush(stdout); #if 1 // DEBUG: print current beams for this iteration @@ -145,7 +147,7 @@ int main(int argc, char ** argv) if (tokens_list.size() > max_tokens_list_size) { - fprintf( stderr , "%s: error: prompt too long (%lu tokens, max %lu)\n" , + fprintf( stderr , "%s: error: prompt too long (%zu tokens, max %zu)\n" , __func__ , tokens_list.size() , max_tokens_list_size ); return 1; } diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 43771b025..09eac2ec2 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -17,6 +17,8 @@ #include "completion.js.hpp" #include "json-schema-to-grammar.mjs.hpp" +#include + #ifndef SERVER_VERBOSE #define SERVER_VERBOSE 1 #endif @@ -1038,7 +1040,7 @@ static json format_timings(llama_server_context &llama) { const auto timings = llama_get_timings(llama.ctx); - assert(timings.n_eval == llama.num_tokens_predicted); + assert(timings.n_eval == ptrdiff_t(llama.num_tokens_predicted)); return json{ {"prompt_n", timings.n_p_eval}, @@ -1239,7 +1241,7 @@ void beam_search_callback(void * callback_data, llama_beams_state beams_state) { const llama_token * tokens = beams_state.beam_views[0].tokens; const auto map = [](llama_token tok) { return completion_token_output{{},tok}; }; std::transform(tokens, tokens + n, llama.generated_token_probs.end() - n, map); - printf("%lu", n); + printf("%zu", n); } fflush(stdout); #if 0 // DEBUG: print current beams for this iteration