flake : use even smaller version of torch
This commit is contained in:
parent
49b89d1682
commit
880780080b
1 changed files with 1 additions and 1 deletions
|
@ -53,7 +53,7 @@
|
||||||
pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece ]);
|
pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece ]);
|
||||||
# TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime
|
# TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime
|
||||||
llama-python-extra =
|
llama-python-extra =
|
||||||
pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece torch transformers ]);
|
pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece torchWithoutCuda transformers ]);
|
||||||
postPatch = ''
|
postPatch = ''
|
||||||
substituteInPlace ./ggml-metal.m \
|
substituteInPlace ./ggml-metal.m \
|
||||||
--replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
|
--replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue