Double CLBlast speed by disabling OpenBLAS thread workaround
Co-authored-by: Concedo <39025047+LostRuins@users.noreply.github.com> Co-authored-by: slaren <2141330+slaren@users.noreply.github.com>
This commit is contained in:
parent
309af7fce9
commit
f469d9afa0
3 changed files with 15 additions and 2 deletions
12
ggml.c
12
ggml.c
|
@ -12354,6 +12354,18 @@ int ggml_cpu_has_cublas(void) {
|
|||
#endif
|
||||
}
|
||||
|
||||
int ggml_cpu_has_clblast(void) {
|
||||
#if defined(GGML_USE_CLBLAST)
|
||||
return 1;
|
||||
#else
|
||||
return 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
int ggml_cpu_has_gpublas(void) {
|
||||
return ggml_cpu_has_cublas() || ggml_cpu_has_clblast();
|
||||
}
|
||||
|
||||
int ggml_cpu_has_sse3(void) {
|
||||
#if defined(__SSE3__)
|
||||
return 1;
|
||||
|
|
3
ggml.h
3
ggml.h
|
@ -852,10 +852,11 @@ extern "C" {
|
|||
GGML_API int ggml_cpu_has_wasm_simd (void);
|
||||
GGML_API int ggml_cpu_has_blas (void);
|
||||
GGML_API int ggml_cpu_has_cublas (void);
|
||||
GGML_API int ggml_cpu_has_clblast (void);
|
||||
GGML_API int ggml_cpu_has_gpublas (void);
|
||||
GGML_API int ggml_cpu_has_sse3 (void);
|
||||
GGML_API int ggml_cpu_has_vsx (void);
|
||||
|
||||
|
||||
//
|
||||
// Internal types and functions exposed for tests and benchmarks
|
||||
//
|
||||
|
|
|
@ -1076,7 +1076,7 @@ static bool llama_eval_internal(
|
|||
// for big prompts, if BLAS is enabled, it is better to use only one thread
|
||||
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
|
||||
ggml_cgraph gf = {};
|
||||
gf.n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_cublas() ? 1 : n_threads;
|
||||
gf.n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
|
||||
|
||||
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
|
||||
memcpy(embd->data, tokens, N*ggml_element_size(embd));
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue