diff --git a/common/grammar-parser.cpp b/common/grammar-parser.cpp index e05d0f8aa..5a545a807 100644 --- a/common/grammar-parser.cpp +++ b/common/grammar-parser.cpp @@ -9,7 +9,7 @@ namespace grammar_parser { // NOTE: assumes valid utf8 (but checks for overrun) // copied from llama.cpp - static auto decode_utf8(const char * src) -> std::pair { + static std::pair decode_utf8(const char * src) { static const int lookup[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 3, 4 }; uint8_t first_byte = static_cast(*src); uint8_t highbits = first_byte >> 4; @@ -50,7 +50,7 @@ namespace grammar_parser { return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '-' || ('0' <= c && c <= '9'); } - static auto parse_hex(const char * src, int size) -> std::pair { + static std::pair parse_hex(const char * src, int size) { const char * pos = src; const char * end = src + size; uint32_t value = 0; @@ -99,7 +99,7 @@ namespace grammar_parser { return pos; } - static auto parse_char(const char * src) -> std::pair { + static std::pair parse_char(const char * src) { if (*src == '\\') { switch (src[1]) { case 'x': return parse_hex(src + 2, 2); diff --git a/llama.cpp b/llama.cpp index cdf7d88c4..374f22795 100644 --- a/llama.cpp +++ b/llama.cpp @@ -6819,9 +6819,9 @@ void llama_dump_timing_info_yaml(FILE * stream, const llama_context * ctx) { } // For internal test use -auto llama_internal_get_tensor_map(struct llama_context * ctx) - -> const std::vector> & -{ +const std::vector> & llama_internal_get_tensor_map( + struct llama_context * ctx +) { return ctx->model.tensors_by_name; } diff --git a/llama.h b/llama.h index f494a83f1..c6ee038c7 100644 --- a/llama.h +++ b/llama.h @@ -540,8 +540,9 @@ extern "C" { struct ggml_tensor; -auto llama_internal_get_tensor_map(struct llama_context * ctx) - -> const std::vector> &; +const std::vector> & llama_internal_get_tensor_map( + struct llama_context * ctx +); #endif // LLAMA_API_INTERNAL