Fix issue with alloc causing max_compute_size to be calculated
This commit is contained in:
parent
57e2a7a52a
commit
f783c5971f
1 changed files with 5 additions and 7 deletions
|
@ -1077,7 +1077,6 @@ int main(int argc, char ** argv) {
|
||||||
std::vector<uint8_t> mem_input_data;
|
std::vector<uint8_t> mem_input_data;
|
||||||
std::vector<uint8_t> mem_compute_data;
|
std::vector<uint8_t> mem_compute_data;
|
||||||
|
|
||||||
ggml_allocr * alloc = NULL;
|
|
||||||
|
|
||||||
// context for input tensors without their data
|
// context for input tensors without their data
|
||||||
struct ggml_init_params ctx_input_params = {
|
struct ggml_init_params ctx_input_params = {
|
||||||
|
@ -1099,10 +1098,9 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
// allocate input tensors
|
// allocate input tensors
|
||||||
mem_input_data.resize(max_input_size);
|
mem_input_data.resize(max_input_size);
|
||||||
alloc = ggml_allocr_new(mem_input_data.data(), mem_input_data.size(), tensor_alignment);
|
ggml_allocr_t alloc_inps = ggml_allocr_new(mem_input_data.data(), mem_input_data.size(), tensor_alignment);
|
||||||
ggml_allocr_alloc(alloc, tokens_input);
|
ggml_allocr_alloc(alloc_inps, tokens_input);
|
||||||
ggml_allocr_alloc(alloc, target_probs);
|
ggml_allocr_alloc(alloc_inps, target_probs);
|
||||||
ggml_allocr_free(alloc);
|
|
||||||
|
|
||||||
// context for compute tensors without their data
|
// context for compute tensors without their data
|
||||||
const size_t estimated_compute_size_wo_data = (
|
const size_t estimated_compute_size_wo_data = (
|
||||||
|
@ -1129,7 +1127,7 @@ int main(int argc, char ** argv) {
|
||||||
// find best evaluation order
|
// find best evaluation order
|
||||||
for (unsigned order = 0; order < (unsigned) GGML_CGRAPH_EVAL_ORDER_COUNT; ++order) {
|
for (unsigned order = 0; order < (unsigned) GGML_CGRAPH_EVAL_ORDER_COUNT; ++order) {
|
||||||
ctx_compute = ggml_init(ctx_compute_params);
|
ctx_compute = ggml_init(ctx_compute_params);
|
||||||
alloc = ggml_allocr_new_measure(tensor_alignment);
|
ggml_allocr_t alloc = ggml_allocr_new_measure(tensor_alignment);
|
||||||
gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
|
gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
|
||||||
gf->order = (enum ggml_cgraph_eval_order) order;
|
gf->order = (enum ggml_cgraph_eval_order) order;
|
||||||
gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
|
gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
|
||||||
|
@ -1162,7 +1160,7 @@ int main(int argc, char ** argv) {
|
||||||
// allocate compute tensors
|
// allocate compute tensors
|
||||||
mem_compute_data.resize(max_compute_size);
|
mem_compute_data.resize(max_compute_size);
|
||||||
ctx_compute = ggml_init(ctx_compute_params);
|
ctx_compute = ggml_init(ctx_compute_params);
|
||||||
alloc = ggml_allocr_new(mem_compute_data.data(), mem_compute_data.size(), tensor_alignment);
|
ggml_allocr_t alloc = ggml_allocr_new(mem_compute_data.data(), mem_compute_data.size(), tensor_alignment);
|
||||||
gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
|
gf = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
|
||||||
gf->order = best_order;
|
gf->order = best_order;
|
||||||
gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
|
gb = ggml_new_graph_custom(ctx_compute, LLAMA_TRAIN_MAX_NODES, true);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue