- removed commented out old code from fix

- updated another instance of same issue below original
This commit is contained in:
l3utterfly 2023-06-19 18:23:25 +08:00
parent ced8e8d40d
commit 2f9366be4a

View file

@ -3122,8 +3122,6 @@ size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dst) {
if (kv_size) { if (kv_size) {
const size_t elt_size = ggml_element_size(kv_self.k); const size_t elt_size = ggml_element_size(kv_self.k);
//char buffer[4096];
ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true }); ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
ggml_cgraph gf{}; ggml_cgraph gf{};
gf.n_threads = 1; gf.n_threads = 1;
@ -3230,9 +3228,7 @@ size_t llama_set_state_data(struct llama_context * ctx, uint8_t * src) {
const size_t elt_size = ggml_element_size(kv_self.k); const size_t elt_size = ggml_element_size(kv_self.k);
char buffer[4096]; ggml_context * cpy_ctx = ggml_init({ 4096, NULL, /* no_alloc */ true });
ggml_context * cpy_ctx = ggml_init({ sizeof(buffer), buffer, /* no_alloc */ true });
ggml_cgraph gf{}; ggml_cgraph gf{};
gf.n_threads = 1; gf.n_threads = 1;