style: format with nixfmt/rfc101-style

This commit is contained in:
ditsuke 2024-02-22 23:55:55 +05:30
parent c6d4cb4655
commit 0126788271
No known key found for this signature in database
GPG key ID: 71B6C31C8A5A9D21
5 changed files with 213 additions and 224 deletions

View file

@ -2,12 +2,9 @@
perSystem = perSystem =
{ config, lib, ... }: { config, lib, ... }:
{ {
devShells = devShells = lib.concatMapAttrs (name: package: {
lib.concatMapAttrs
(name: package: {
${name} = package.passthru.shell; ${name} = package.passthru.shell;
${name + "-extra"} = package.passthru.shell-extra; ${name + "-extra"} = package.passthru.shell-extra;
}) }) config.packages;
config.packages;
}; };
} }

View file

@ -26,16 +26,14 @@
config.cudaSupport = true; config.cudaSupport = true;
config.allowUnfreePredicate = config.allowUnfreePredicate =
p: p:
builtins.all builtins.all (
(
license: license:
license.free license.free
|| builtins.elem license.shortName [ || builtins.elem license.shortName [
"CUDA EULA" "CUDA EULA"
"cuDNN EULA" "cuDNN EULA"
] ]
) ) (p.meta.licenses or [ p.meta.license ]);
(p.meta.licenses or [ p.meta.license ]);
}; };
# Ensure dependencies use ROCm consistently # Ensure dependencies use ROCm consistently
pkgsRocm = import inputs.nixpkgs { pkgsRocm = import inputs.nixpkgs {

View file

@ -20,12 +20,14 @@
vulkan-loader, vulkan-loader,
curl, curl,
shaderc, shaderc,
useBlas ? builtins.all (x: !x) [ useBlas ?
builtins.all (x: !x) [
useCuda useCuda
useMetalKit useMetalKit
useRocm useRocm
useVulkan useVulkan
] && blas.meta.available, ]
&& blas.meta.available,
useCuda ? config.cudaSupport, useCuda ? config.cudaSupport,
useMetalKit ? stdenv.isAarch64 && stdenv.isDarwin, useMetalKit ? stdenv.isAarch64 && stdenv.isDarwin,
useMpi ? false, # Increases the runtime closure size by ~700M useMpi ? false, # Increases the runtime closure size by ~700M
@ -38,7 +40,7 @@
# otherwise we get libstdc++ errors downstream. # otherwise we get libstdc++ errors downstream.
effectiveStdenv ? if useCuda then cudaPackages.backendStdenv else stdenv, effectiveStdenv ? if useCuda then cudaPackages.backendStdenv else stdenv,
enableStatic ? effectiveStdenv.hostPlatform.isStatic, enableStatic ? effectiveStdenv.hostPlatform.isStatic,
precompileMetalShaders ? false precompileMetalShaders ? false,
}@inputs: }@inputs:
let let
@ -63,9 +65,9 @@ let
pnameSuffix = pnameSuffix =
strings.optionalString (suffices != [ ]) strings.optionalString (suffices != [ ])
"-${strings.concatMapStringsSep "-" strings.toLower suffices}"; "-${strings.concatMapStringsSep "-" strings.toLower suffices}";
descriptionSuffix = descriptionSuffix = strings.optionalString (
strings.optionalString (suffices != [ ]) suffices != [ ]
", accelerated with ${strings.concatStringsSep ", " suffices}"; ) ", accelerated with ${strings.concatStringsSep ", " suffices}";
executableSuffix = effectiveStdenv.hostPlatform.extensions.executable; executableSuffix = effectiveStdenv.hostPlatform.extensions.executable;
@ -76,16 +78,13 @@ let
# #
# TODO: Package up each Python script or service appropriately, by making # TODO: Package up each Python script or service appropriately, by making
# them into "entrypoints" # them into "entrypoints"
llama-python = python3.withPackages ( llama-python = python3.withPackages (ps: [
ps: [
ps.numpy ps.numpy
ps.sentencepiece ps.sentencepiece
] ]);
);
# TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime # TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime
llama-python-extra = python3.withPackages ( llama-python-extra = python3.withPackages (ps: [
ps: [
ps.numpy ps.numpy
ps.sentencepiece ps.sentencepiece
ps.tiktoken ps.tiktoken
@ -107,8 +106,7 @@ let
# for scripts/compare-llama-bench.py # for scripts/compare-llama-bench.py
ps.gitpython ps.gitpython
ps.tabulate ps.tabulate
] ]);
);
xcrunHost = runCommand "xcrunHost" { } '' xcrunHost = runCommand "xcrunHost" { } ''
mkdir -p $out/bin mkdir -p $out/bin
@ -145,8 +143,7 @@ let
]; ];
in in
effectiveStdenv.mkDerivation ( effectiveStdenv.mkDerivation (finalAttrs: {
finalAttrs: {
pname = "llama-cpp${pnameSuffix}"; pname = "llama-cpp${pnameSuffix}";
version = llamaVersion; version = llamaVersion;
@ -193,13 +190,11 @@ effectiveStdenv.mkDerivation (
] ]
++ optionals useCuda [ ++ optionals useCuda [
cudaPackages.cuda_nvcc cudaPackages.cuda_nvcc
autoAddDriverRunpath autoAddDriverRunpath
] ]
++ optionals (effectiveStdenv.hostPlatform.isGnu && enableStatic) [ ++ optionals (effectiveStdenv.hostPlatform.isGnu && enableStatic) [ glibc.static ]
glibc.static ++ optionals (effectiveStdenv.isDarwin && useMetalKit && precompileMetalShaders) [ xcrunHost ];
] ++ optionals (effectiveStdenv.isDarwin && useMetalKit && precompileMetalShaders) [
xcrunHost
];
buildInputs = buildInputs =
optionals effectiveStdenv.isDarwin darwinBuildInputs optionals effectiveStdenv.isDarwin darwinBuildInputs
@ -318,5 +313,4 @@ effectiveStdenv.mkDerivation (
# Extend `badPlatforms` instead # Extend `badPlatforms` instead
platforms = lib.platforms.all; platforms = lib.platforms.all;
}; };
} })
)

View file

@ -8,12 +8,10 @@
# because it allows users to apply overlays later using `overrideScope'`. # because it allows users to apply overlays later using `overrideScope'`.
# Cf. https://noogle.dev/f/lib/makeScope # Cf. https://noogle.dev/f/lib/makeScope
lib.makeScope newScope ( lib.makeScope newScope (self: {
self: {
inherit llamaVersion; inherit llamaVersion;
llama-cpp = self.callPackage ./package.nix { }; llama-cpp = self.callPackage ./package.nix { };
docker = self.callPackage ./docker.nix { }; docker = self.callPackage ./docker.nix { };
docker-min = self.callPackage ./docker.nix { interactive = false; }; docker-min = self.callPackage ./docker.nix { interactive = false; };
sif = self.callPackage ./sif.nix { }; sif = self.callPackage ./sif.nix { };
} })
)

View file

@ -145,7 +145,9 @@
# the same path you would with an overlay. # the same path you would with an overlay.
legacyPackages = { legacyPackages = {
llamaPackages = pkgs.callPackage .devops/nix/scope.nix { inherit llamaVersion; }; llamaPackages = pkgs.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
llamaPackagesWindows = pkgs.pkgsCross.mingwW64.callPackage .devops/nix/scope.nix { inherit llamaVersion; }; llamaPackagesWindows = pkgs.pkgsCross.mingwW64.callPackage .devops/nix/scope.nix {
inherit llamaVersion;
};
llamaPackagesCuda = pkgsCuda.callPackage .devops/nix/scope.nix { inherit llamaVersion; }; llamaPackagesCuda = pkgsCuda.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
llamaPackagesRocm = pkgsRocm.callPackage .devops/nix/scope.nix { inherit llamaVersion; }; llamaPackagesRocm = pkgsRocm.callPackage .devops/nix/scope.nix { inherit llamaVersion; };
}; };