Fix editorconfig and unused variable

This commit is contained in:
slaren 2024-06-13 18:06:41 +02:00
parent c39d5ecd2b
commit d3131ce565

View file

@ -3840,8 +3840,8 @@ struct llama_model_loader {
std::vector<std::future<std::pair<ggml_tensor *, bool>>> validation_result; std::vector<std::future<std::pair<ggml_tensor *, bool>>> validation_result;
#if defined(GGML_USE_CUDA) #if defined(GGML_USE_CUDA)
// 4 staging buffers for async uploads, each sized 1MB seems to be a good default for single NVMe drives. // 4 staging buffers for async uploads, each sized 1MB seems to be a good default for single NVMe drives.
// NVMe raid configurations might require more / larger buffers. // NVMe raid configurations might require more / larger buffers.
constexpr size_t num_buffers = 4; constexpr size_t num_buffers = 4;
constexpr size_t buffer_size = 1 * 1024 * 1024; // 1MB constexpr size_t buffer_size = 1 * 1024 * 1024; // 1MB
@ -3858,8 +3858,8 @@ struct llama_model_loader {
if (buf) { if (buf) {
ggml_backend_buffer_type_t buffer_type = ggml_backend_buffer_get_type(buf); ggml_backend_buffer_type_t buffer_type = ggml_backend_buffer_get_type(buf);
for (int i = 0; i < ggml_backend_cuda_get_device_count(); ++i) { for (int i = 0; i < ggml_backend_cuda_get_device_count(); ++i) {
auto cuda_buffer_type = ggml_backend_cuda_buffer_type(i); auto * cuda_buffer_type = ggml_backend_cuda_buffer_type(i);
if (buffer_type == ggml_backend_cuda_buffer_type(i)) { if (buffer_type == cuda_buffer_type) {
cuda_backend = ggml_backend_cuda_init(i); cuda_backend = ggml_backend_cuda_init(i);
break; break;
} }