cont : fix mmap flag print (#11699)
This commit is contained in:
		
							parent
							
								
									4d3465c5ae
								
							
						
					
					
						commit
						bdcf8b6a56
					
				
					 2 changed files with 1 additions and 2 deletions
				
			
		|  | @ -1275,7 +1275,7 @@ bool llama_model::load_tensors(llama_model_loader & ml) { | ||||||
| 
 | 
 | ||||||
|     const bool use_mmap_buffer = true; |     const bool use_mmap_buffer = true; | ||||||
| 
 | 
 | ||||||
|     LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, use_mmap_buffer ? "true" : "false"); |     LLAMA_LOG_INFO("%s: loading model tensors, this can take a while... (mmap = %s)\n", __func__, ml.use_mmap ? "true" : "false"); | ||||||
| 
 | 
 | ||||||
|     // build a list of buffer types for the CPU and GPU devices
 |     // build a list of buffer types for the CPU and GPU devices
 | ||||||
|     pimpl->cpu_buft_list = make_cpu_buft_list(devices); |     pimpl->cpu_buft_list = make_cpu_buft_list(devices); | ||||||
|  |  | ||||||
|  | @ -9430,7 +9430,6 @@ static struct llama_model * llama_model_load_from_file_impl( | ||||||
|         struct llama_model_params params) { |         struct llama_model_params params) { | ||||||
|     ggml_time_init(); |     ggml_time_init(); | ||||||
| 
 | 
 | ||||||
| 
 |  | ||||||
|     unsigned cur_percentage = 0; |     unsigned cur_percentage = 0; | ||||||
|     if (params.progress_callback == NULL) { |     if (params.progress_callback == NULL) { | ||||||
|         params.progress_callback_user_data = &cur_percentage; |         params.progress_callback_user_data = &cur_percentage; | ||||||
|  |  | ||||||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue