diff --git a/ggml.c b/ggml.c index b96a82a41..cc403d1a2 100644 --- a/ggml.c +++ b/ggml.c @@ -23014,4 +23014,12 @@ int ggml_cpu_has_matmul_int8(void) { #endif } +int ggml_cpu_is_xeonphi_knc(void) { +#if defined(__k1om__) + return 1; +#else + return 0; +#endif +} + //////////////////////////////////////////////////////////////////////////////// diff --git a/ggml.h b/ggml.h index 3fe95ed57..7558af95f 100644 --- a/ggml.h +++ b/ggml.h @@ -2394,6 +2394,7 @@ extern "C" { GGML_API int ggml_cpu_has_sycl (void); GGML_API int ggml_cpu_has_vsx (void); GGML_API int ggml_cpu_has_matmul_int8(void); + GGML_API int ggml_cpu_is_xeonphi_knc (void); // // Internal types and functions exposed for tests and benchmarks diff --git a/llama.cpp b/llama.cpp index 202bf94c8..407d9816e 100644 --- a/llama.cpp +++ b/llama.cpp @@ -17998,6 +17998,7 @@ const char * llama_print_system_info(void) { #else s += "LLAMAFILE = 0 | "; #endif + s += "XEONPHI_KNC = " + std::to_string(ggml_cpu_is_xeonphi_knc()) + " | "; return s.c_str(); }