do not cast to size_t, instead just use doubles

This commit is contained in:
Concedo 2023-12-14 16:43:34 +08:00
parent 1ad8f0d80e
commit 05f7db4b29
3 changed files with 4 additions and 4 deletions

4
ggml.c
View file

@ -2011,8 +2011,8 @@ size_t ggml_type_size(enum ggml_type type) {
return type_traits[type].type_size;
}
float ggml_type_sizef(enum ggml_type type) {
return ((float)(type_traits[type].type_size))/type_traits[type].blck_size;
double ggml_type_sizef(enum ggml_type type) {
return ((double)(type_traits[type].type_size))/type_traits[type].blck_size;
}
const char * ggml_type_name(enum ggml_type type) {

2
ggml.h
View file

@ -643,7 +643,7 @@ extern "C" {
GGML_API int ggml_blck_size (enum ggml_type type);
GGML_API size_t ggml_type_size (enum ggml_type type); // size in bytes for all elements in a block
GGML_API float ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float
GGML_API double ggml_type_sizef(enum ggml_type type); // ggml_type_size()/ggml_blck_size() as float
GGML_API const char * ggml_type_name(enum ggml_type type);
GGML_API const char * ggml_op_name (enum ggml_op op);

View file

@ -1555,7 +1555,7 @@ static bool llama_kv_cache_init(
cache.cells.clear();
cache.cells.resize(n_ctx);
cache.buf.resize(n_elements*((size_t)(ggml_type_sizef(ktype) + ggml_type_sizef(vtype))) + 2u*n_layer*ggml_tensor_overhead());
cache.buf.resize(n_elements*(ggml_type_sizef(ktype) + ggml_type_sizef(vtype)) + 2u*n_layer*ggml_tensor_overhead());
memset(cache.buf.data, 0, cache.buf.size);
struct ggml_init_params params;