llama : add benchmark example (#2626)
* llama : add benchmark example * add to examples CMakeLists.txt * fix msvc build * add missing include * add Bessel's correction to stdev calculation Co-authored-by: Johannes Gäßler <johannesg@5d6.de> * improve markdown formatting * add missing include * print warning is NDEBUG is not defined * remove n_prompt and n_gen from the matrix, use each value separately instead * better checks for non-optimized builds * llama.cpp : fix MEM_REQ_SCRATCH0 reusing the value of n_ctx of the first call * fix json formatting * add sql output * add basic cpu and gpu info (linx/cuda only) * markdown: also show values that differ from the default * markdown: add build id * cleanup * improve formatting * formatting --------- Co-authored-by: Johannes Gäßler <johannesg@5d6.de>
This commit is contained in:
parent
eaf98c2649
commit
097e121e2f
9 changed files with 1029 additions and 28 deletions
12
ggml-cuda.cu
12
ggml-cuda.cu
|
@ -6469,3 +6469,15 @@ bool ggml_cuda_compute_forward(struct ggml_compute_params * params, struct ggml_
|
|||
func(tensor->src[0], tensor->src[1], tensor);
|
||||
return true;
|
||||
}
|
||||
|
||||
int ggml_cuda_get_device_count() {
|
||||
int device_count;
|
||||
CUDA_CHECK(cudaGetDeviceCount(&device_count));
|
||||
return device_count;
|
||||
}
|
||||
|
||||
void ggml_cuda_get_device_description(int device, char * description, size_t description_size) {
|
||||
cudaDeviceProp prop;
|
||||
CUDA_CHECK(cudaGetDeviceProperties(&prop, device));
|
||||
snprintf(description, description_size, "%s", prop.name);
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue