flake.nix: avoid re-evaluating nixpkgs too many times

This commit is contained in:
Someone Serge 2023-12-25 15:27:58 +00:00
parent c8d39878d9
commit 4b12b0579f
No known key found for this signature in database
GPG key ID: 7B0E3B1390D61DA4
3 changed files with 39 additions and 23 deletions

View file

@ -1,5 +0,0 @@
final: prev:
{
llama-cpp = final.callPackage ./package.nix { };
}

3
.devops/nix/scope.nix Normal file
View file

@ -0,0 +1,3 @@
{ lib, newScope }:
lib.makeScope newScope (self: { llama-cpp = self.callPackage ./package.nix { }; })

View file

@ -17,23 +17,42 @@
in in
{ {
# These define the various ways to build the llama.cpp project. # An overlay can be used to have a more granular control over llama-cpp's
# Integrate them into your flake.nix configuration by adding this overlay to nixpkgs.overlays. # dependencies and configuration, than that offered by the `.override`
overlays.default = import ./.devops/nix/overlay.nix; # mechanism. Cf. https://nixos.org/manual/nixpkgs/stable/#chap-overlays.
#
# E.g. in a flake:
# ```
# { nixpkgs, llama-cpp, ... }:
# let pkgs = import nixpkgs {
# overlays = [ (llama-cpp.overlays.default) ];
# system = "aarch64-linux";
# config.allowUnfree = true;
# config.cudaSupport = true;
# config.cudaCapabilities = [ "7.2" ];
# config.cudaEnableForwardCompat = false;
# }; in {
# packages.aarch64-linux.llamaJetsonXavier = pkgs.llamaPackages.llama-cpp;
# }
# ```
#
# Cf. https://nixos.org/manual/nix/unstable/command-ref/new-cli/nix3-flake.html?highlight=flake#flake-format
overlays.default = (final: prev: { llamaPackages = final.callPackage .devops/nix/scope.nix { }; });
# These use the package definition from `./.devops/nix/package.nix`. # These use the package definition from `./.devops/nix/package.nix`.
# There's one per backend that llama-cpp uses. Add more as needed! # There's one per backend that llama-cpp uses. Add more as needed!
packages = eachSystem ( packages = eachSystem (
system: system:
let let
defaultConfig = { # Avoid re-evaluation for the nixpkgs instance,
inherit system; # cf. https://zimbatm.com/notes/1000-instances-of-nixpkgs
overlays = [ self.overlays.default ]; pkgs = nixpkgs.legacyPackages.${system};
};
pkgs = import nixpkgs defaultConfig; # Ensure dependencies use CUDA consistently (e.g. that openmpi, ucc,
# and ucx are built with CUDA support)
pkgsCuda = import nixpkgs {
inherit system;
# Let's not make a big deal about getting the CUDA bits.
cudaConfig = defaultConfig // {
config.cudaSupport = true; config.cudaSupport = true;
config.allowUnfreePredicate = config.allowUnfreePredicate =
p: p:
@ -48,19 +67,18 @@
) )
(p.meta.licenses or [ p.meta.license ]); (p.meta.licenses or [ p.meta.license ]);
}; };
pkgsCuda = import nixpkgs cudaConfig;
# Let's make sure to turn on ROCm support across the whole package ecosystem. # Ensure dependencies use ROCm consistently
rocmConfig = defaultConfig // { pkgsRocm = import nixpkgs {
inherit system;
config.rocmSupport = true; config.rocmSupport = true;
}; };
pkgsRocm = import nixpkgs rocmConfig;
in in
{ {
default = pkgs.llama-cpp; default = (pkgs.callPackage .devops/nix/scope.nix { }).llama-cpp;
opencl = pkgs.llama-cpp.override { useOpenCL = true; }; opencl = self.packages.${system}.default.override { useOpenCL = true; };
cuda = pkgsCuda.llama-cpp; cuda = (pkgsCuda.callPackage .devops/nix/scope.nix { }).llama-cpp;
rocm = pkgsRocm.llama-cpp; rocm = (pkgsRocm.callPackage .devops/nix/scope.nix { }).llama-cpp;
} }
); );