llama : make tensor_split ptr instead of array (#2272)
This commit is contained in:
parent
54e3bc76fe
commit
ae178ab46b
4 changed files with 8 additions and 4 deletions
|
@ -849,7 +849,7 @@ struct llama_context_params llama_context_default_params() {
|
|||
/*.n_batch =*/ 512,
|
||||
/*.gpu_layers =*/ 0,
|
||||
/*.main_gpu =*/ 0,
|
||||
/*.tensor_split =*/ {0},
|
||||
/*.tensor_split =*/ nullptr,
|
||||
/*.rope_freq_base =*/ 10000.0f,
|
||||
/*.rope_freq_scale =*/ 1.0f,
|
||||
/*.progress_callback =*/ nullptr,
|
||||
|
@ -1289,7 +1289,7 @@ static bool llama_model_load(
|
|||
int n_batch,
|
||||
int n_gpu_layers,
|
||||
int main_gpu,
|
||||
float * tensor_split,
|
||||
const float * tensor_split,
|
||||
float rope_freq_base,
|
||||
float rope_freq_scale,
|
||||
bool low_vram,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue