llama : fix platforms without mmap (#4578)
* llama : fix platforms without mmap * win32 : limit prefetch size to the file size * fix win32 error clobber, unnecessary std::string in std::runtime_error
This commit is contained in:
parent
48b24b170e
commit
48b7ff193e
3 changed files with 24 additions and 21 deletions
6
ggml.c
6
ggml.c
|
@ -10335,7 +10335,8 @@ static void ggml_compute_forward_scale_f32(
|
|||
}
|
||||
|
||||
// scale factor
|
||||
const float v = *(float *) dst->op_params;
|
||||
float v;
|
||||
memcpy(&v, dst->op_params, sizeof(float));
|
||||
|
||||
const int ith = params->ith;
|
||||
const int nth = params->nth;
|
||||
|
@ -15152,7 +15153,8 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
|||
{
|
||||
// necessary for llama
|
||||
if (src0->grad) {
|
||||
const float s = ((float *) tensor->op_params)[0];
|
||||
float s;
|
||||
memcpy(&s, tensor->op_params, sizeof(float));
|
||||
|
||||
src0->grad =
|
||||
ggml_add_or_set(ctx,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue