diff --git a/ggml.c b/ggml.c index 9a7bd1d8c..d7cfe3a26 100644 --- a/ggml.c +++ b/ggml.c @@ -21590,4 +21590,12 @@ int ggml_cpu_has_matmul_int8(void) { #endif } +int ggml_cpu_is_xeonphi_knc(void) { +#if defined(__k1om__) + return 1; +#else + return 0; +#endif +} + //////////////////////////////////////////////////////////////////////////////// diff --git a/ggml.h b/ggml.h index 1171088a9..0024bbc7a 100644 --- a/ggml.h +++ b/ggml.h @@ -2358,6 +2358,7 @@ extern "C" { GGML_API int ggml_cpu_has_sycl (void); GGML_API int ggml_cpu_has_vsx (void); GGML_API int ggml_cpu_has_matmul_int8(void); + GGML_API int ggml_cpu_is_xeonphi_knc (void); // // Internal types and functions exposed for tests and benchmarks diff --git a/llama.cpp b/llama.cpp index ad7b7b7d4..2b0ee2922 100644 --- a/llama.cpp +++ b/llama.cpp @@ -14229,6 +14229,7 @@ const char * llama_print_system_info(void) { s += "SSSE3 = " + std::to_string(ggml_cpu_has_ssse3()) + " | "; s += "VSX = " + std::to_string(ggml_cpu_has_vsx()) + " | "; s += "MATMUL_INT8 = " + std::to_string(ggml_cpu_has_matmul_int8()) + " | "; + s += "XEONPHI_KNC = " + std::to_string(ggml_cpu_is_xeonphi_knc()) + " | "; return s.c_str(); }