diff --git a/.editorconfig b/.editorconfig index 1a8840f9b..2e108a7df 100644 --- a/.editorconfig +++ b/.editorconfig @@ -21,9 +21,4 @@ indent_style = tab [prompts/*.txt] insert_final_newline = unset -[examples/server/public/*] -indent_size = 2 - -[examples/llama.swiftui/llama.swiftui.xcodeproj/*] -indent_style = tab diff --git a/.gitignore b/.gitignore index b5a055955..cfaf40397 100644 --- a/.gitignore +++ b/.gitignore @@ -87,19 +87,19 @@ ppl-*.txt qnt-*.txt perf-*.txt -# Examples +# core -examples/jeopardy/results.txt -examples/server/*.css.hpp -examples/server/*.html.hpp -examples/server/*.js.hpp -examples/server/*.mjs.hpp +core/jeopardy/results.txt +core/server/*.css.hpp +core/server/*.html.hpp +core/server/*.js.hpp +core/server/*.mjs.hpp !build_64.sh -!examples/*.bat -!examples/*/*.kts -!examples/*/*/*.kts -!examples/sycl/*.bat -!examples/sycl/*.sh +!core/*.bat +!core/*/*.kts +!core/*/*/*.kts +!core/sycl/*.bat +!core/sycl/*.sh # Python diff --git a/CMakeLists.txt b/CMakeLists.txt index 4c829518f..f5d231229 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -183,12 +183,12 @@ install(FILES "${CMAKE_CURRENT_BINARY_DIR}/llama.pc" DESTINATION lib/pkgconfig) # -# programs, examples +# programs, core # add_subdirectory(common) if (LLAMA_BUILD_EXAMPLES) - add_subdirectory(examples) + add_subdirectory(core) add_subdirectory(pocs) endif() diff --git a/Makefile b/Makefile index bd0ee5589..5058be94d 100644 --- a/Makefile +++ b/Makefile @@ -1065,7 +1065,7 @@ $(LIB_COMMON_S): \ clean: rm -vrf *.dot $(BUILD_TARGETS) rm -rvf src/*.o - rm -rvf examples/*.o + rm -rvf core/*.o rm -rvf common/*.o rm -rvf *.a rm -rvf *.dll @@ -1082,10 +1082,10 @@ clean: rm -rvf $(BUILD_TARGETS) rm -f vulkan-shaders-gen ggml/src/ggml-vulkan-shaders.hpp ggml/src/ggml-vulkan-shaders.cpp rm -rvf $(LEGACY_TARGETS_CLEAN) - find examples pocs -type f -name "*.o" -delete + find core pocs -type f -name "*.o" -delete # -# Examples +# core # # $< is the first prerequisite, i.e. the source file. @@ -1095,7 +1095,7 @@ clean: # Helper function that replaces .c, .cpp, and .cu file endings with .o: GET_OBJ_FILE = $(patsubst %.c,%.o,$(patsubst %.cpp,%.o,$(patsubst %.cu,%.o,$(1)))) -llama-cli: examples/main/main.cpp \ +llama-cli: core/main/main.cpp \ $(OBJ_ALL) $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out %.h $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) @@ -1104,7 +1104,7 @@ llama-cli: examples/main/main.cpp \ @echo ifdef GGML_RPC -rpc-server: examples/rpc/rpc-server.cpp \ +rpc-server: core/rpc/rpc-server.cpp \ $(OBJ_GGML) $(CXX) $(CXXFLAGS) $^ -o $@ $(LDFLAGS) endif # GGML_RPC @@ -1142,7 +1142,7 @@ llama-q8dot: pocs/vdot/q8dot.cpp ggml/src/ggml.o \ # NOTE: We currently will always build the deprecation-warning `main` and `server` binaries to help users migrate. # Eventually we will want to remove these target from building all the time. -main: examples/deprecation-warning/deprecation-warning.cpp +main: core/deprecation-warning/deprecation-warning.cpp $(CXX) $(CXXFLAGS) -c $< -o $(call GET_OBJ_FILE, $<) $(CXX) $(CXXFLAGS) $(filter-out $<,$^) $(call GET_OBJ_FILE, $<) -o $@ $(LDFLAGS) @echo "NOTICE: The 'main' binary is deprecated. Please use 'llama-cli' instead." diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py deleted file mode 100755 index 4087187c1..000000000 --- a/convert_hf_to_gguf.py +++ /dev/null @@ -1,3689 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -from __future__ import annotations - -import logging -import argparse -import contextlib -import json -import os -import re -import sys -from enum import IntEnum -from pathlib import Path -from hashlib import sha256 -from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Literal, Sequence, TypeVar, cast - -import math -import numpy as np -import torch - -if TYPE_CHECKING: - from torch import Tensor - -if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) -import gguf - -logger = logging.getLogger("hf-to-gguf") - - -###### MODEL DEFINITIONS ###### - -class SentencePieceTokenTypes(IntEnum): - NORMAL = 1 - UNKNOWN = 2 - CONTROL = 3 - USER_DEFINED = 4 - UNUSED = 5 - BYTE = 6 - - -AnyModel = TypeVar("AnyModel", bound="type[Model]") - - -class Model: - _model_classes: dict[str, type[Model]] = {} - - dir_model: Path - ftype: gguf.LlamaFileType - fname_out: Path - is_big_endian: bool - endianess: gguf.GGUFEndian - use_temp_file: bool - lazy: bool - part_names: list[str] - is_safetensors: bool - hparams: dict[str, Any] - block_count: int - tensor_map: gguf.TensorNameMap - tensor_names: set[str] | None - gguf_writer: gguf.GGUFWriter - model_name: str | None - metadata_override: Path | None - dir_model_card: Path - - # subclasses should define this! - model_arch: gguf.MODEL_ARCH - - def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, is_big_endian: bool = False, - use_temp_file: bool = False, eager: bool = False, - metadata_override: Path | None = None, model_name: str | None = None, - split_max_tensors: int = 0, split_max_size: int = 0, dry_run: bool = False, small_first_shard: bool = False): - if type(self) is Model: - raise TypeError(f"{type(self).__name__!r} should not be directly instantiated") - - self.dir_model = dir_model - self.ftype = ftype - self.fname_out = fname_out - self.is_big_endian = is_big_endian - self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE - self.use_temp_file = use_temp_file - self.lazy = not eager - self.part_names = Model.get_model_part_names(self.dir_model, "model", ".safetensors") - self.is_safetensors = len(self.part_names) > 0 - if not self.is_safetensors: - self.part_names = Model.get_model_part_names(self.dir_model, "pytorch_model", ".bin") - self.hparams = Model.load_hparams(self.dir_model) - self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer", "num_layers"]) - self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count) - self.tensor_names = None - self.metadata_override = metadata_override - self.model_name = model_name - self.dir_model_card = dir_model # overridden in convert_lora_to_gguf.py - - # Apply heuristics to figure out typical tensor encoding based on first layer tensor encoding type - if self.ftype == gguf.LlamaFileType.GUESSED: - # NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie. - _, first_tensor = next(self.get_tensors()) - if first_tensor.dtype == torch.float16: - logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})") - self.ftype = gguf.LlamaFileType.MOSTLY_F16 - else: - logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})") - self.ftype = gguf.LlamaFileType.MOSTLY_BF16 - - # Configure GGUF Writer - self.gguf_writer = gguf.GGUFWriter(path=None, arch=gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file, - split_max_tensors=split_max_tensors, split_max_size=split_max_size, dry_run=dry_run, small_first_shard=small_first_shard) - - @classmethod - def __init_subclass__(cls): - # can't use an abstract property, because overriding it without type errors - # would require using decorated functions instead of simply defining the property - if "model_arch" not in cls.__dict__: - raise TypeError(f"Missing property 'model_arch' for {cls.__name__!r}") - - def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any: - key = next((k for k in keys if k in self.hparams), None) - if key is not None: - return self.hparams[key] - if optional: - return None - raise KeyError(f"could not find any of: {keys}") - - def set_vocab(self): - self._set_vocab_gpt2() - - def get_tensors(self) -> Iterator[tuple[str, Tensor]]: - tensor_names_from_parts: set[str] = set() - - if len(self.part_names) > 1: - self.tensor_names = set() - index_name = "model.safetensors" if self.is_safetensors else "pytorch_model.bin" - index_name += ".index.json" - logger.info(f"gguf: loading model weight map from '{index_name}'") - with open(self.dir_model / index_name, "r", encoding="utf-8") as f: - index: dict[str, Any] = json.load(f) - weight_map = index.get("weight_map") - if weight_map is None or not isinstance(weight_map, dict): - raise ValueError(f"Can't load 'weight_map' from {index_name!r}") - self.tensor_names.update(weight_map.keys()) - else: - self.tensor_names = tensor_names_from_parts - - for part_name in self.part_names: - logger.info(f"gguf: loading model part '{part_name}'") - ctx: ContextManager[Any] - if self.is_safetensors: - from safetensors import safe_open - ctx = cast(ContextManager[Any], safe_open(self.dir_model / part_name, framework="pt", device="cpu")) - else: - ctx = contextlib.nullcontext(torch.load(str(self.dir_model / part_name), map_location="cpu", mmap=True, weights_only=True)) - - with ctx as model_part: - tensor_names_from_parts.update(model_part.keys()) - - for name in model_part.keys(): - if self.is_safetensors: - if self.lazy: - data = model_part.get_slice(name) - data = LazyTorchTensor.from_safetensors_slice(data) - else: - data = model_part.get_tensor(name) - else: - data = model_part[name] - if self.lazy: - data = LazyTorchTensor.from_eager(data) - yield name, data - - # only verify tensor name presence; it doesn't matter if they are not in the right files - if len(sym_diff := tensor_names_from_parts.symmetric_difference(self.tensor_names)) > 0: - raise ValueError(f"Mismatch between weight map and model parts for tensor names: {sym_diff}") - - def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str: - if key not in gguf.MODEL_TENSORS[self.model_arch]: - raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}") - name: str = gguf.TENSOR_NAMES[key] - if "{bid}" in name: - assert bid is not None - name = name.format(bid=bid) - return name + suffix - - def match_model_tensor_name(self, name: str, key: gguf.MODEL_TENSOR, bid: int | None, suffix: str = ".weight") -> bool: - if key not in gguf.MODEL_TENSORS[self.model_arch]: - return False - key_name: str = gguf.TENSOR_NAMES[key] - if "{bid}" in key_name: - if bid is None: - return False - key_name = key_name.format(bid=bid) - else: - if bid is not None: - return False - return name == (key_name + suffix) - - def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str: - new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes) - if new_name is None: - raise ValueError(f"Can not map tensor {name!r}") - return new_name - - def set_gguf_parameters(self): - self.gguf_writer.add_block_count(self.block_count) - - if (n_ctx := self.find_hparam(["max_position_embeddings", "n_ctx"], optional=True)) is not None: - self.gguf_writer.add_context_length(n_ctx) - logger.info(f"gguf: context length = {n_ctx}") - - n_embd = self.find_hparam(["hidden_size", "n_embd"]) - self.gguf_writer.add_embedding_length(n_embd) - logger.info(f"gguf: embedding length = {n_embd}") - - if (n_ff := self.find_hparam(["intermediate_size", "n_inner"], optional=True)) is not None: - self.gguf_writer.add_feed_forward_length(n_ff) - logger.info(f"gguf: feed forward length = {n_ff}") - - n_head = self.find_hparam(["num_attention_heads", "n_head"]) - self.gguf_writer.add_head_count(n_head) - logger.info(f"gguf: head count = {n_head}") - - if (n_head_kv := self.hparams.get("num_key_value_heads")) is not None: - self.gguf_writer.add_head_count_kv(n_head_kv) - logger.info(f"gguf: key-value head count = {n_head_kv}") - - if (rope_theta := self.hparams.get("rope_theta")) is not None: - self.gguf_writer.add_rope_freq_base(rope_theta) - logger.info(f"gguf: rope theta = {rope_theta}") - if (f_rms_eps := self.hparams.get("rms_norm_eps")) is not None: - self.gguf_writer.add_layer_norm_rms_eps(f_rms_eps) - logger.info(f"gguf: rms norm epsilon = {f_rms_eps}") - if (f_norm_eps := self.find_hparam(["layer_norm_eps", "layer_norm_epsilon", "norm_epsilon"], optional=True)) is not None: - self.gguf_writer.add_layer_norm_eps(f_norm_eps) - logger.info(f"gguf: layer norm epsilon = {f_norm_eps}") - if (n_experts := self.hparams.get("num_local_experts")) is not None: - self.gguf_writer.add_expert_count(n_experts) - logger.info(f"gguf: expert count = {n_experts}") - if (n_experts_used := self.hparams.get("num_experts_per_tok")) is not None: - self.gguf_writer.add_expert_used_count(n_experts_used) - logger.info(f"gguf: experts used count = {n_experts_used}") - - if (head_dim := self.hparams.get("head_dim")) is not None: - self.gguf_writer.add_key_length(head_dim) - self.gguf_writer.add_value_length(head_dim) - - self.gguf_writer.add_file_type(self.ftype) - logger.info(f"gguf: file type = {self.ftype}") - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - return [(self.map_tensor_name(name), data_torch)] - - def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: - del name, new_name, bid, n_dims # unused - - return False - - def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: - del name, new_name, bid, n_dims # unused - - return False - - def prepare_tensors(self): - max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,") - - for name, data_torch in self.get_tensors(): - # we don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")): - continue - - old_dtype = data_torch.dtype - - # convert any unsupported data types to float32 - if data_torch.dtype not in (torch.float16, torch.float32): - data_torch = data_torch.to(torch.float32) - - # use the first number-like part of the tensor name as the block id - bid = None - for part in name.split("."): - if part.isdecimal(): - bid = int(part) - break - - for new_name, data in ((n, d.squeeze().numpy()) for n, d in self.modify_tensors(data_torch, name, bid)): - data: np.ndarray # type hint - n_dims = len(data.shape) - data_dtype = data.dtype - data_qtype: gguf.GGMLQuantizationType | None = None - - # when both are True, f32 should win - extra_f32 = self.extra_f32_tensors(name, new_name, bid, n_dims) - extra_f16 = self.extra_f16_tensors(name, new_name, bid, n_dims) - - # Most of the codebase that takes in 1D tensors or norms only handles F32 tensors - # Conditions should closely match those in llama_model_quantize_internal in llama.cpp - extra_f32 = any(cond for cond in ( - extra_f32, - n_dims == 1, - new_name.endswith("_norm.weight"), - )) - - # Some tensor types are always in float32 - extra_f32 = extra_f32 or any(self.match_model_tensor_name(new_name, key, bid) for key in ( - gguf.MODEL_TENSOR.FFN_GATE_INP, - gguf.MODEL_TENSOR.POS_EMBD, - gguf.MODEL_TENSOR.TOKEN_TYPES, - )) - - # if f16 desired, convert any float32 2-dim weight tensors to float16 - extra_f16 = any(cond for cond in ( - extra_f16, - (name.endswith(".weight") and n_dims >= 2), - )) - - if self.ftype != gguf.LlamaFileType.ALL_F32 and extra_f16 and not extra_f32: - if self.ftype == gguf.LlamaFileType.MOSTLY_BF16: - data = gguf.quantize_bf16(data) - assert data.dtype == np.int16 - data_qtype = gguf.GGMLQuantizationType.BF16 - - elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0 and gguf.can_quantize_to_q8_0(data): - data = gguf.quantize_q8_0(data) - assert data.dtype == np.uint8 - data_qtype = gguf.GGMLQuantizationType.Q8_0 - - else: # default to float16 for quantized tensors - if data_dtype != np.float16: - data = data.astype(np.float16) - data_qtype = gguf.GGMLQuantizationType.F16 - - if data_qtype is None: # by default, convert to float32 - if data_dtype != np.float32: - data = data.astype(np.float32) - data_qtype = gguf.GGMLQuantizationType.F32 - - shape = gguf.quant_shape_from_byte_shape(data.shape, data_qtype) if data.dtype == np.uint8 else data.shape - - # reverse shape to make it similar to the internal ggml dimension order - shape_str = f"{{{', '.join(str(n) for n in reversed(shape))}}}" - - # n_dims is implicit in the shape - logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}") - - self.gguf_writer.add_tensor(new_name, data, raw_dtype=data_qtype) - - def set_type(self): - self.gguf_writer.add_type(gguf.GGUFType.MODEL) - - def prepare_metadata(self, vocab_only: bool): - - total_params, shared_params, expert_params, expert_count = self.gguf_writer.get_total_parameter_count() - - self.metadata = gguf.Metadata.load(self.metadata_override, self.dir_model_card, self.model_name, total_params) - - # Fallback to model directory name if metadata name is still missing - if self.metadata.name is None: - self.metadata.name = self.dir_model.name - - # Generate parameter weight class (useful for leader boards) if not yet determined - if self.metadata.size_label is None and total_params > 0: - self.metadata.size_label = gguf.size_label(total_params, shared_params, expert_params, expert_count) - - # Extract the encoding scheme from the file type name. e.g. 'gguf.LlamaFileType.MOSTLY_Q8_0' --> 'Q8_0' - output_type: str = self.ftype.name.partition("_")[2] - - # Filename Output - if self.fname_out.is_dir(): - # Generate default filename based on model specification and available metadata - if not vocab_only: - fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, self.metadata.size_label, output_type, model_type="LoRA" if total_params < 0 else None) - else: - fname_default: str = gguf.naming_convention(self.metadata.name, self.metadata.basename, self.metadata.finetune, self.metadata.version, size_label=None, output_type=None, model_type="vocab") - - # Use the default filename - self.fname_out = self.fname_out / f"{fname_default}.gguf" - else: - # Output path is a custom defined templated filename - # Note: `not is_dir()` is used because `.is_file()` will not detect - # file template strings as it doesn't actually exist as a file - - # Process templated file name with the output ftype, useful with the "auto" ftype - self.fname_out = self.fname_out.parent / gguf.fill_templated_filename(self.fname_out.name, output_type) - - self.set_type() - - logger.info("Set meta model") - self.metadata.set_gguf_meta_model(self.gguf_writer) - - logger.info("Set model parameters") - self.set_gguf_parameters() - - logger.info("Set model tokenizer") - self.set_vocab() - - logger.info("Set model quantization version") - self.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION) - - def write(self): - self.prepare_tensors() - self.prepare_metadata(vocab_only=False) - self.gguf_writer.write_header_to_file(path=self.fname_out) - self.gguf_writer.write_kv_data_to_file() - self.gguf_writer.write_tensors_to_file(progress=True) - self.gguf_writer.close() - - def write_vocab(self): - if len(self.gguf_writer.tensors) != 1: - raise ValueError('Splitting the vocabulary is not supported') - - self.prepare_metadata(vocab_only=True) - self.gguf_writer.write_header_to_file(path=self.fname_out) - self.gguf_writer.write_kv_data_to_file() - self.gguf_writer.close() - - @staticmethod - def get_model_part_names(dir_model: Path, prefix: str, suffix: str) -> list[str]: - part_names: list[str] = [] - for filename in os.listdir(dir_model): - if filename.startswith(prefix) and filename.endswith(suffix): - part_names.append(filename) - - part_names.sort() - - return part_names - - @staticmethod - def load_hparams(dir_model: Path): - with open(dir_model / "config.json", "r", encoding="utf-8") as f: - return json.load(f) - - @classmethod - def register(cls, *names: str) -> Callable[[AnyModel], AnyModel]: - assert names - - def func(modelcls: AnyModel) -> AnyModel: - for name in names: - cls._model_classes[name] = modelcls - return modelcls - return func - - @classmethod - def from_model_architecture(cls, arch: str) -> type[Model]: - try: - return cls._model_classes[arch] - except KeyError: - raise NotImplementedError(f'Architecture {arch!r} not supported!') from None - - def does_token_look_special(self, token: str | bytes) -> bool: - if isinstance(token, (bytes, bytearray)): - token_text = token.decode(encoding="utf-8") - elif isinstance(token, memoryview): - token_text = token.tobytes().decode(encoding="utf-8") - else: - token_text = token - - # Some models mark some added tokens which ought to be control tokens as not special. - # (e.g. command-r, command-r-plus, deepseek-coder, gemma{,-2}) - seems_special = token_text in ( - "", # deepseek-coder - "", "<2mass>", "[@BOS@]", # gemma{,-2} - ) - - seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) - seems_special = seems_special or (token_text.startswith("<|") and token_text.endswith("|>")) # deepseek-coder - - # TODO: should these be marked as UNUSED instead? (maybe not) - seems_special = seems_special or (token_text.startswith("")) # gemma{,-2} - - return seems_special - - # used for GPT-2 BPE and WordPiece vocabs - def get_vocab_base(self) -> tuple[list[str], list[int], str]: - tokens: list[str] = [] - toktypes: list[int] = [] - - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(self.dir_model) - vocab_size = self.hparams.get("vocab_size", len(tokenizer.vocab)) - assert max(tokenizer.vocab.values()) < vocab_size - - tokpre = self.get_vocab_base_pre(tokenizer) - - reverse_vocab = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()} - added_vocab = tokenizer.get_added_vocab() - - for i in range(vocab_size): - if i not in reverse_vocab: - tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.UNUSED) - else: - token: str = reverse_vocab[i] - if token in added_vocab: - if tokenizer.added_tokens_decoder[i].special or self.does_token_look_special(token): - toktypes.append(gguf.TokenType.CONTROL) - else: - token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces - toktypes.append(gguf.TokenType.USER_DEFINED) - else: - toktypes.append(gguf.TokenType.NORMAL) - tokens.append(token) - - return tokens, toktypes, tokpre - - # NOTE: this function is generated by convert_hf_to_gguf_update.py - # do not modify it manually! - # ref: https://github.com/ggerganov/llama.cpp/pull/6920 - # Marker: Start get_vocab_base_pre - def get_vocab_base_pre(self, tokenizer) -> str: - # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that - # is specific for the BPE pre-tokenizer used by the model - # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can - # use in llama.cpp to implement the same pre-tokenizer - - chktxt = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶\u200d🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````""""......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL' - - chktok = tokenizer.encode(chktxt) - chkhsh = sha256(str(chktok).encode()).hexdigest() - - logger.debug(f"chktok: {chktok}") - logger.debug(f"chkhsh: {chkhsh}") - - res = None - - # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script - # or pull the latest version of the model from Huggingface - # don't edit the hashes manually! - if chkhsh == "0ef9807a4087ebef797fc749390439009c3b9eda9ad1a097abbe738f486c01e5": - # ref: https://huggingface.co/meta-llama/Meta-Llama-3-8B - res = "llama-bpe" - if chkhsh == "049ecf7629871e3041641907f3de7c733e4dbfdc736f57d882ba0b0845599754": - # ref: https://huggingface.co/deepseek-ai/deepseek-llm-7b-base - res = "deepseek-llm" - if chkhsh == "347715f544604f9118bb75ed199f68779f423cabb20db6de6f31b908d04d7821": - # ref: https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base - res = "deepseek-coder" - if chkhsh == "8aeee3860c56296a157a1fe2fad249ec40aa59b1bb5709f4ade11c4e6fe652ed": - # ref: https://huggingface.co/tiiuae/falcon-7b - res = "falcon" - if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f": - # ref: https://huggingface.co/BAAI/bge-small-en-v1.5 - res = "bert-bge" - if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166": - # ref: https://huggingface.co/mosaicml/mpt-7b - res = "mpt" - if chkhsh == "35d91631860c815f952d711435f48d356ebac988362536bed955d43bfa436e34": - # ref: https://huggingface.co/bigcode/starcoder2-3b - res = "starcoder" - if chkhsh == "3ce83efda5659b07b1ad37ca97ca5797ea4285d9b9ab0dc679e4a720c9da7454": - # ref: https://huggingface.co/openai-community/gpt2 - res = "gpt-2" - if chkhsh == "32d85c31273f8019248f2559fed492d929ea28b17e51d81d3bb36fff23ca72b3": - # ref: https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b - res = "stablelm2" - if chkhsh == "6221ad2852e85ce96f791f476e0b390cf9b474c9e3d1362f53a24a06dc8220ff": - # ref: https://huggingface.co/smallcloudai/Refact-1_6-base - res = "refact" - if chkhsh == "9c2227e4dd922002fb81bde4fc02b0483ca4f12911410dee2255e4987644e3f8": - # ref: https://huggingface.co/CohereForAI/c4ai-command-r-v01 - res = "command-r" - if chkhsh == "e636dc30a262dcc0d8c323492e32ae2b70728f4df7dfe9737d9f920a282b8aea": - # ref: https://huggingface.co/Qwen/Qwen1.5-7B - res = "qwen2" - if chkhsh == "b6dc8df998e1cfbdc4eac8243701a65afe638679230920b50d6f17d81c098166": - # ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf - res = "olmo" - if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e": - # ref: https://huggingface.co/databricks/dbrx-base - res = "dbrx" - if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f": - # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en - res = "jina-v2-en" - if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643": - # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es - res = "jina-v2-es" - if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6": - # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de - res = "jina-v2-de" - if chkhsh == "c136ed14d01c2745d4f60a9596ae66800e2b61fa45643e72436041855ad4089d": - # ref: https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct - res = "smaug-bpe" - if chkhsh == "c7ea5862a53e4272c035c8238367063e2b270d51faa48c0f09e9d5b54746c360": - # ref: https://huggingface.co/LumiOpen/Poro-34B-chat - res = "poro-chat" - if chkhsh == "7967bfa498ade6b757b064f31e964dddbb80f8f9a4d68d4ba7998fcf281c531a": - # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-code - res = "jina-v2-code" - if chkhsh == "b6e8e1518dc4305be2fe39c313ed643381c4da5db34a98f6a04c093f8afbe99b": - # ref: https://huggingface.co/THUDM/glm-4-9b-chat - res = "chatglm-bpe" - if chkhsh == "7fc505bd3104ca1083b150b17d088b59534ede9bde81f0dd2090967d7fe52cee": - # ref: https://huggingface.co/LumiOpen/Viking-7B - res = "viking" - if chkhsh == "b53802fb28e26d645c3a310b34bfe07da813026ec7c7716883404d5e0f8b1901": - # ref: https://huggingface.co/core42/jais-13b - res = "jais" - if chkhsh == "7b3e7548e4308f52a76e8229e4e6cc831195d0d1df43aed21ac6c93da05fec5f": - # ref: https://huggingface.co/WisdomShell/CodeShell-7B - res = "codeshell" - if chkhsh == "63b97e4253352e6f357cc59ea5b583e3a680eaeaf2632188c2b952de2588485e": - # ref: https://huggingface.co/mistralai/Mistral-Nemo-Base-2407 - res = "tekken" - if chkhsh == "855059429035d75a914d1eda9f10a876752e281a054a7a3d421ef0533e5b6249": - # ref: https://huggingface.co/HuggingFaceTB/SmolLM-135M - res = "smollm" - - if res is None: - logger.warning("\n") - logger.warning("**************************************************************************************") - logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!") - logger.warning("** There are 2 possible reasons for this:") - logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet") - logger.warning("** - the pre-tokenization config has changed upstream") - logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.") - logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920") - logger.warning("**") - logger.warning(f"** chkhsh: {chkhsh}") - logger.warning("**************************************************************************************") - logger.warning("\n") - raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()") - - logger.debug(f"tokenizer.ggml.pre: {repr(res)}") - logger.debug(f"chkhsh: {chkhsh}") - - return res - # Marker: End get_vocab_base_pre - - def _set_vocab_gpt2(self) -> None: - tokens, toktypes, tokpre = self.get_vocab_base() - self.gguf_writer.add_tokenizer_model("gpt2") - self.gguf_writer.add_tokenizer_pre(tokpre) - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=True) - special_vocab.add_to_gguf(self.gguf_writer) - - def _set_vocab_qwen(self): - dir_model = self.dir_model - hparams = self.hparams - tokens: list[str] = [] - toktypes: list[int] = [] - - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) - vocab_size = hparams["vocab_size"] - assert max(tokenizer.get_vocab().values()) < vocab_size - - tokpre = self.get_vocab_base_pre(tokenizer) - - merges = [] - vocab = {} - mergeable_ranks = tokenizer.mergeable_ranks - for token, rank in mergeable_ranks.items(): - vocab[QwenModel.token_bytes_to_string(token)] = rank - if len(token) == 1: - continue - merged = QwenModel.bpe(mergeable_ranks, token, max_rank=rank) - assert len(merged) == 2 - merges.append(' '.join(map(QwenModel.token_bytes_to_string, merged))) - - # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined - added_vocab = tokenizer.special_tokens - reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()} - - for i in range(vocab_size): - if i not in reverse_vocab: - tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.UNUSED) - elif reverse_vocab[i] in added_vocab: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.CONTROL) - else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) - - self.gguf_writer.add_tokenizer_model("gpt2") - self.gguf_writer.add_tokenizer_pre(tokpre) - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(dir_model, load_merges=False) - special_vocab.merges = merges - # only add special tokens when they were not already loaded from config.json - if len(special_vocab.special_token_ids) == 0: - special_vocab._set_special_token("bos", tokenizer.special_tokens["<|endoftext|>"]) - special_vocab._set_special_token("eos", tokenizer.special_tokens["<|endoftext|>"]) - # this one is usually not in config.json anyway - special_vocab._set_special_token("unk", tokenizer.special_tokens["<|endoftext|>"]) - special_vocab.add_to_gguf(self.gguf_writer) - - def _set_vocab_sentencepiece(self, add_to_gguf=True): - tokens, scores, toktypes = self._create_vocab_sentencepiece() - - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - def _create_vocab_sentencepiece(self): - from sentencepiece import SentencePieceProcessor - - tokenizer_path = self.dir_model / 'tokenizer.model' - - if not tokenizer_path.is_file(): - raise FileNotFoundError(f"File not found: {tokenizer_path}") - - tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(str(tokenizer_path)) - - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) - - tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] - scores: list[float] = [-10000.0] * vocab_size - toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - - for token_id in range(tokenizer.vocab_size()): - piece = tokenizer.IdToPiece(token_id) - text = piece.encode("utf-8") - score = tokenizer.GetScore(token_id) - - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.IsUnknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.IsControl(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.IsUnused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.IsByte(token_id): - toktype = SentencePieceTokenTypes.BYTE - - tokens[token_id] = text - scores[token_id] = score - toktypes[token_id] = toktype - - added_tokens_file = self.dir_model / 'added_tokens.json' - if added_tokens_file.is_file(): - with open(added_tokens_file, "r", encoding="utf-8") as f: - added_tokens_json = json.load(f) - for key in added_tokens_json: - token_id = added_tokens_json[key] - if token_id >= vocab_size: - logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}') - continue - - tokens[token_id] = key.encode("utf-8") - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - - tokenizer_config_file = self.dir_model / 'tokenizer_config.json' - if tokenizer_config_file.is_file(): - with open(tokenizer_config_file, "r", encoding="utf-8") as f: - tokenizer_config_json = json.load(f) - added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {}) - for token_id, token_data in added_tokens_decoder.items(): - token_id = int(token_id) - token: str = token_data["content"] - if toktypes[token_id] != SentencePieceTokenTypes.UNUSED: - if tokens[token_id] != token.encode("utf-8"): - logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token!r}') - if token_data.get("special") or self.does_token_look_special(token): - toktypes[token_id] = SentencePieceTokenTypes.CONTROL - else: - token = token.replace(b"\xe2\x96\x81".decode("utf-8"), " ") # pre-normalize user-defined spaces - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - - scores[token_id] = -1000.0 - tokens[token_id] = token.encode("utf-8") - - if vocab_size > len(tokens): - pad_count = vocab_size - len(tokens) - logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]") - for i in range(1, pad_count + 1): - tokens.append(bytes(f"[PAD{i}]", encoding="utf-8")) - scores.append(-1000.0) - toktypes.append(SentencePieceTokenTypes.UNUSED) - - return tokens, scores, toktypes - - def _set_vocab_llama_hf(self): - vocab = gguf.LlamaHfVocab(self.dir_model) - tokens = [] - scores = [] - toktypes = [] - - for text, score, toktype in vocab.all_tokens(): - tokens.append(text) - scores.append(score) - toktypes.append(toktype) - - assert len(tokens) == vocab.vocab_size - - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - def _set_vocab_builtin(self, model_name: Literal["gpt-neox", "llama-spm"], vocab_size: int): - tokenizer_path = Path(sys.path[0]) / "models" / f"ggml-vocab-{model_name}.gguf" - logger.warning(f"Using tokenizer from '{os.path.relpath(tokenizer_path, os.getcwd())}'") - vocab_reader = gguf.GGUFReader(tokenizer_path, "r") - - default_pre = "mpt" if model_name == "gpt-neox" else "default" - - field = vocab_reader.get_field(gguf.Keys.Tokenizer.MODEL) - assert field # tokenizer model - self.gguf_writer.add_tokenizer_model(bytes(field.parts[-1]).decode("utf-8")) - - field = vocab_reader.get_field(gguf.Keys.Tokenizer.PRE) - self.gguf_writer.add_tokenizer_pre(bytes(field.parts[-1]).decode("utf-8") if field else default_pre) - - field = vocab_reader.get_field(gguf.Keys.Tokenizer.LIST) - assert field # token list - self.gguf_writer.add_token_list([bytes(field.parts[i]) for i in field.data][:vocab_size]) - - if model_name == "llama-spm": - field = vocab_reader.get_field(gguf.Keys.Tokenizer.SCORES) - assert field # token scores - self.gguf_writer.add_token_scores([field.parts[i].tolist()[0] for i in field.data][:vocab_size]) - - field = vocab_reader.get_field(gguf.Keys.Tokenizer.TOKEN_TYPE) - assert field # token types - self.gguf_writer.add_token_types([field.parts[i].tolist()[0] for i in field.data][:vocab_size]) - - if model_name != "llama-spm": - field = vocab_reader.get_field(gguf.Keys.Tokenizer.MERGES) - assert field # token merges - self.gguf_writer.add_token_merges([bytes(field.parts[i]) for i in field.data]) - - if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.BOS_ID)) is not None: - self.gguf_writer.add_bos_token_id(field.parts[-1].tolist()[0]) - if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.EOS_ID)) is not None: - self.gguf_writer.add_eos_token_id(field.parts[-1].tolist()[0]) - if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.UNK_ID)) is not None: - self.gguf_writer.add_unk_token_id(field.parts[-1].tolist()[0]) - if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.PAD_ID)) is not None: - self.gguf_writer.add_pad_token_id(field.parts[-1].tolist()[0]) - if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_BOS)) is not None: - self.gguf_writer.add_add_bos_token(field.parts[-1].tolist()[0]) - if (field := vocab_reader.get_field(gguf.Keys.Tokenizer.ADD_EOS)) is not None: - self.gguf_writer.add_add_eos_token(field.parts[-1].tolist()[0]) - - -@Model.register("GPTNeoXForCausalLM") -class GPTNeoXModel(Model): - model_arch = gguf.MODEL_ARCH.GPTNEOX - - def set_gguf_parameters(self): - block_count = self.hparams["num_hidden_layers"] - - self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_dimension_count( - int(self.hparams["rotary_pct"] * (self.hparams["hidden_size"] // self.hparams["num_attention_heads"])), - ) - self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) - self.gguf_writer.add_parallel_residual(self.hparams.get("use_parallel_residual", True)) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"]) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) - n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) - - tensors: list[tuple[str, Tensor]] = [] - - if re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.weight", name): - # Map bloom-style qkv_linear to gpt-style qkv_linear - # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa - # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa - qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed)) - data_torch = torch.cat( - ( - qkv_weights[:, 0, :, :].reshape((-1, n_embed)), - qkv_weights[:, 1, :, :].reshape((-1, n_embed)), - qkv_weights[:, 2, :, :].reshape((-1, n_embed)), - ), - dim=0, - ) - logger.info("re-format attention.linear_qkv.weight") - elif re.match(r"gpt_neox\.layers\.\d+\.attention\.query_key_value\.bias", name): - qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head)) - data_torch = torch.cat( - ( - qkv_bias[:, 0, :].reshape((n_embed,)), - qkv_bias[:, 1, :].reshape((n_embed,)), - qkv_bias[:, 2, :].reshape((n_embed,)), - ), - dim=0, - ) - logger.info("re-format attention.linear_qkv.bias") - - tensors.append((self.map_tensor_name(name), data_torch)) - - return tensors - - -@Model.register("BloomForCausalLM") -class BloomModel(Model): - model_arch = gguf.MODEL_ARCH.BLOOM - - def set_gguf_parameters(self): - n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) - n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) - self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed)) - self.gguf_writer.add_embedding_length(n_embed) - self.gguf_writer.add_feed_forward_length(4 * n_embed) - self.gguf_writer.add_block_count(self.hparams["n_layer"]) - self.gguf_writer.add_head_count(n_head) - self.gguf_writer.add_head_count_kv(n_head) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) - n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) - - name = re.sub(r'transformer\.', '', name) - - tensors: list[tuple[str, Tensor]] = [] - - if re.match(r"h\.\d+\.self_attention\.query_key_value\.weight", name): - # Map bloom-style qkv_linear to gpt-style qkv_linear - # bloom: https://github.com/huggingface/transformers/blob/main/src/transformers/models/bloom/modeling_bloom.py#L238-L252 # noqa - # gpt-2: https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt2/modeling_gpt2.py#L312 # noqa - qkv_weights = data_torch.reshape((n_head, 3, n_embed // n_head, n_embed)) - data_torch = torch.cat( - ( - qkv_weights[:, 0, :, :].reshape((-1, n_embed)), - qkv_weights[:, 1, :, :].reshape((-1, n_embed)), - qkv_weights[:, 2, :, :].reshape((-1, n_embed)), - ), - dim=0, - ) - logger.info("re-format attention.linear_qkv.weight") - elif re.match(r"h\.\d+\.self_attention\.query_key_value\.bias", name): - qkv_bias = data_torch.reshape((n_head, 3, n_embed // n_head)) - data_torch = torch.cat( - ( - qkv_bias[:, 0, :].reshape((n_embed,)), - qkv_bias[:, 1, :].reshape((n_embed,)), - qkv_bias[:, 2, :].reshape((n_embed,)), - ), - dim=0, - ) - logger.info("re-format attention.linear_qkv.bias") - - tensors.append((self.map_tensor_name(name), data_torch)) - - if name == "word_embeddings.weight": - assert self.tensor_names is not None - - # TODO: tie them at runtime, don't duplicate in the model file - if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")): - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - - return tensors - - -@Model.register("MPTForCausalLM") -class MPTModel(Model): - model_arch = gguf.MODEL_ARCH.MPT - - def set_vocab(self): - try: - self._set_vocab_gpt2() - except Exception: - # Fallback for SEA-LION model - self._set_vocab_sentencepiece() - self.gguf_writer.add_add_bos_token(False) - self.gguf_writer.add_pad_token_id(3) - self.gguf_writer.add_eos_token_id(1) - self.gguf_writer.add_unk_token_id(0) - - def set_gguf_parameters(self): - block_count = self.hparams["n_layers"] - self.gguf_writer.add_context_length(self.hparams["max_seq_len"]) - self.gguf_writer.add_embedding_length(self.hparams["d_model"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(4 * self.hparams["d_model"]) - self.gguf_writer.add_head_count(self.hparams["n_heads"]) - if kv_n_heads := self.hparams["attn_config"].get("kv_n_heads"): - self.gguf_writer.add_head_count_kv(kv_n_heads) - self.gguf_writer.add_layer_norm_eps(1e-5) - if self.hparams["attn_config"]["clip_qkv"] is not None: - self.gguf_writer.add_clamp_kqv(self.hparams["attn_config"]["clip_qkv"]) - if self.hparams["attn_config"]["alibi"]: - self.gguf_writer.add_max_alibi_bias(self.hparams["attn_config"]["alibi_bias_max"]) - else: - self.gguf_writer.add_max_alibi_bias(0.0) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - if "scales" in name: - new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias", ".scales")) - new_name = new_name.replace("scales", "act.scales") - else: - new_name = self.map_tensor_name(name, try_suffixes=(".weight", ".bias")) - - return [(new_name, data_torch)] - - -@Model.register("OrionForCausalLM") -class OrionModel(Model): - model_arch = gguf.MODEL_ARCH.ORION - - def set_vocab(self): - self._set_vocab_sentencepiece() - - def set_gguf_parameters(self): - block_count = self.hparams["num_hidden_layers"] - head_count = self.hparams["num_attention_heads"] - head_count_kv = self.hparams.get("num_key_value_heads", head_count) - - ctx_length = 0 - if "max_sequence_length" in self.hparams: - ctx_length = self.hparams["max_sequence_length"] - elif "max_position_embeddings" in self.hparams: - ctx_length = self.hparams["max_position_embeddings"] - elif "model_max_length" in self.hparams: - ctx_length = self.hparams["model_max_length"] - else: - raise ValueError("gguf: can not find ctx length parameter.") - - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_tensor_data_layout("Meta AI original pth") - self.gguf_writer.add_context_length(ctx_length) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_head_count(head_count) - self.gguf_writer.add_head_count_kv(head_count_kv) - # note: config provides rms norm but it is actually layer norm - # ref: https://huggingface.co/OrionStarAI/Orion-14B-Chat/blob/276a17221ce42beb45f66fac657a41540e71f4f5/modeling_orion.py#L570-L571 - self.gguf_writer.add_layer_norm_eps(self.hparams["rms_norm_eps"]) - - -@Model.register("BaichuanForCausalLM", "BaiChuanForCausalLM") -class BaichuanModel(Model): - model_arch = gguf.MODEL_ARCH.BAICHUAN - - def set_vocab(self): - self._set_vocab_sentencepiece() - - def set_gguf_parameters(self): - block_count = self.hparams["num_hidden_layers"] - head_count = self.hparams["num_attention_heads"] - head_count_kv = self.hparams.get("num_key_value_heads", head_count) - - ctx_length = 0 - if "max_sequence_length" in self.hparams: - ctx_length = self.hparams["max_sequence_length"] - elif "max_position_embeddings" in self.hparams: - ctx_length = self.hparams["max_position_embeddings"] - elif "model_max_length" in self.hparams: - ctx_length = self.hparams["model_max_length"] - else: - raise ValueError("gguf: can not find ctx length parameter.") - - self.gguf_writer.add_tensor_data_layout("Meta AI original pth") - self.gguf_writer.add_context_length(ctx_length) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count(head_count) - self.gguf_writer.add_head_count_kv(head_count_kv) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_file_type(self.ftype) - - if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: - if self.hparams["rope_scaling"].get("type") == "linear": - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - head_count = self.hparams["num_attention_heads"] - head_count_kv = self.hparams.get("num_key_value_heads", head_count) - - tensors: list[tuple[str, Tensor]] = [] - - if bid is not None and name == f"model.layers.{bid}.self_attn.W_pack.weight": - logger.info(f"Unpacking and permuting layer {bid}") - tensors = [ - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), - self._reverse_hf_permute_part(data_torch, 0, head_count, head_count)), - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), - self._reverse_hf_permute_part(data_torch, 1, head_count, head_count_kv)), - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), - self._reverse_hf_part(data_torch, 2)), - ] - else: - tensors = [(self.map_tensor_name(name), data_torch)] - - return tensors - - def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: - if n_kv_head is not None and n_head != n_kv_head: - n_head //= n_kv_head - - return ( - weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) - .swapaxes(1, 2) - .reshape(weights.shape) - ) - - def _reverse_hf_permute_part( - self, weights: Tensor, n_part: int, n_head: int, n_head_kv: int | None = None, - ) -> Tensor: - r = weights.shape[0] // 3 - return self._reverse_hf_permute(weights[r * n_part:r * n_part + r, ...], n_head, n_head_kv) - - def _reverse_hf_part(self, weights: Tensor, n_part: int) -> Tensor: - r = weights.shape[0] // 3 - return weights[r * n_part:r * n_part + r, ...] - - -@Model.register("XverseForCausalLM") -class XverseModel(Model): - model_arch = gguf.MODEL_ARCH.XVERSE - - def set_vocab(self): - assert (self.dir_model / "tokenizer.json").is_file() - dir_model = self.dir_model - hparams = self.hparams - - tokens: list[bytes] = [] - toktypes: list[int] = [] - - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(dir_model) - vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) - # Since we are checking the maximum index, we need to ensure it's strictly less than vocab_size, - # because vocab_size is the count of items, and indexes start at 0. - max_vocab_index = max(tokenizer.get_vocab().values()) - if max_vocab_index >= vocab_size: - raise ValueError("Vocabulary size exceeds expected maximum size.") - - reverse_vocab: dict[int, str] = {id_: encoded_tok for encoded_tok, id_ in tokenizer.vocab.items()} - added_vocab = tokenizer.get_added_vocab() - - for token_id in range(vocab_size): - token_text = reverse_vocab[token_id].encode('utf-8') - # replace "\x00" to string with length > 0 - if token_text == b"\x00": - toktype = gguf.TokenType.BYTE # special - token_text = f"<{token_text}>".encode('utf-8') - elif re.fullmatch(br"<0x[0-9A-Fa-f]{2}>", token_text): - toktype = gguf.TokenType.BYTE # special - elif reverse_vocab[token_id] in added_vocab: - if tokenizer.added_tokens_decoder[token_id].special: - toktype = gguf.TokenType.CONTROL - else: - toktype = gguf.TokenType.USER_DEFINED - else: - toktype = gguf.TokenType.NORMAL - - tokens.append(token_text) - toktypes.append(toktype) - - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - block_count = self.hparams["num_hidden_layers"] - head_count = self.hparams["num_attention_heads"] - head_count_kv = self.hparams.get("num_key_value_heads", head_count) - - ctx_length = 0 - if "max_sequence_length" in self.hparams: - ctx_length = self.hparams["max_sequence_length"] - elif "max_position_embeddings" in self.hparams: - ctx_length = self.hparams["max_position_embeddings"] - elif "model_max_length" in self.hparams: - ctx_length = self.hparams["model_max_length"] - else: - raise ValueError("gguf: can not find ctx length parameter.") - - self.gguf_writer.add_tensor_data_layout("Meta AI original pth") - self.gguf_writer.add_context_length(ctx_length) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count(head_count) - self.gguf_writer.add_head_count_kv(head_count_kv) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_file_type(self.ftype) - - if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: - if self.hparams["rope_scaling"].get("type") == "linear": - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - head_count = self.hparams["num_attention_heads"] - head_count_kv = self.hparams.get("num_key_value_heads", head_count) - - # HF models permute some of the tensors, so we need to undo that - if name.endswith("q_proj.weight"): - data_torch = self._reverse_hf_permute(data_torch, head_count, head_count) - if name.endswith("k_proj.weight"): - data_torch = self._reverse_hf_permute(data_torch, head_count, head_count_kv) - - return [(self.map_tensor_name(name), data_torch)] - - def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: - if n_kv_head is not None and n_head != n_kv_head: - n_head //= n_kv_head - - return ( - weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) - .swapaxes(1, 2) - .reshape(weights.shape) - ) - - -@Model.register("FalconForCausalLM", "RWForCausalLM") -class FalconModel(Model): - model_arch = gguf.MODEL_ARCH.FALCON - - def set_gguf_parameters(self): - block_count = self.hparams.get("num_hidden_layers") - if block_count is None: - block_count = self.hparams["n_layer"] # old name - - n_head = self.hparams.get("num_attention_heads") - if n_head is None: - n_head = self.hparams["n_head"] # old name - - n_head_kv = self.hparams.get("num_kv_heads") - if n_head_kv is None: - n_head_kv = self.hparams.get("n_head_kv", 1) # old name - - self.gguf_writer.add_context_length(2048) # not in config.json - self.gguf_writer.add_tensor_data_layout("jploski") # qkv tensor transform - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_feed_forward_length(4 * self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(n_head) - self.gguf_writer.add_head_count_kv(n_head_kv) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - # QKV tensor transform - # The original query_key_value tensor contains n_head_kv "kv groups", - # each consisting of n_head/n_head_kv query weights followed by one key - # and one value weight (shared by all query heads in the kv group). - # This layout makes it a big pain to work with in GGML. - # So we rearrange them here,, so that we have n_head query weights - # followed by n_head_kv key weights followed by n_head_kv value weights, - # in contiguous fashion. - # ref: https://github.com/jploski/ggml/blob/falcon40b/examples/falcon/convert-hf-to-ggml.py - - if "query_key_value" in name: - n_head = self.find_hparam(["num_attention_heads", "n_head"]) - n_head_kv = self.find_hparam(["num_kv_heads", "n_head_kv"], optional=True) or 1 - head_dim = self.hparams["hidden_size"] // n_head - - qkv = data_torch.view(n_head_kv, n_head // n_head_kv + 2, head_dim, head_dim * n_head) - q = qkv[:, :-2].reshape(n_head * head_dim, head_dim * n_head) - k = qkv[:, [-2]].reshape(n_head_kv * head_dim, head_dim * n_head) - v = qkv[:, [-1]].reshape(n_head_kv * head_dim, head_dim * n_head) - data_torch = torch.cat((q, k, v)).reshape_as(data_torch) - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("GPTBigCodeForCausalLM") -class StarCoderModel(Model): - model_arch = gguf.MODEL_ARCH.STARCODER - - def set_gguf_parameters(self): - block_count = self.hparams["n_layer"] - - self.gguf_writer.add_context_length(self.hparams["n_positions"]) - self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) - self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(self.hparams["n_head"]) - self.gguf_writer.add_head_count_kv(1) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - -@Model.register("GPTRefactForCausalLM") -class RefactModel(Model): - model_arch = gguf.MODEL_ARCH.REFACT - - def set_vocab(self): - super().set_vocab() - - # TODO: how to determine special FIM tokens automatically? - special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False, - special_token_types = ['prefix', 'suffix', 'middle', 'eot']) - special_vocab._set_special_token("prefix", 1) - special_vocab._set_special_token("suffix", 3) - special_vocab._set_special_token("middle", 2) - special_vocab.chat_template = None # do not add it twice - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - hidden_dim = self.hparams["n_embd"] - inner_dim = 4 * hidden_dim - hidden_dim = int(2 * inner_dim / 3) - multiple_of = 256 - ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) - - block_count = self.hparams["n_layer"] - - # refact uses Alibi. So this is from config.json which might be used by training. - self.gguf_writer.add_context_length(self.hparams["n_positions"]) - self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) - - self.gguf_writer.add_feed_forward_length(ff_dim) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(self.hparams["n_head"]) - self.gguf_writer.add_head_count_kv(1) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - hidden_dim = self.hparams["n_embd"] - inner_dim = 4 * hidden_dim - hidden_dim = int(2 * inner_dim / 3) - multiple_of = 256 - ff_dim = multiple_of * ((hidden_dim + multiple_of - 1) // multiple_of) - n_head = self.hparams["n_head"] - n_head_kv = 1 - head_dim = self.hparams["n_embd"] // n_head - - tensors: list[tuple[str, Tensor]] = [] - - if bid is not None: - if name == f"transformer.h.{bid}.attn.kv.weight": - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), data_torch[:n_head_kv * head_dim])) - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), data_torch[n_head_kv * head_dim:])) - elif name == f"transformer.h.{bid}.attn.q.weight": - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), data_torch)) - elif name == f"transformer.h.{bid}.mlp.gate_up_proj.weight": - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim])) - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:])) - - if len(tensors) == 0: - tensors.append((self.map_tensor_name(name), data_torch)) - - return tensors - - -@Model.register("StableLmForCausalLM", "StableLMEpochForCausalLM", "LlavaStableLMEpochForCausalLM") -class StableLMModel(Model): - model_arch = gguf.MODEL_ARCH.STABLELM - - def set_vocab(self): - if (self.dir_model / "tokenizer.json").is_file(): - self._set_vocab_gpt2() - else: - # StableLM 2 1.6B used to have a vocab in a similar format to Qwen's vocab - self._set_vocab_qwen() - - def set_gguf_parameters(self): - hparams = self.hparams - block_count = hparams["num_hidden_layers"] - - self.gguf_writer.add_context_length(hparams["max_position_embeddings"]) - self.gguf_writer.add_embedding_length(hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) - rotary_factor = self.find_hparam(["partial_rotary_factor", "rope_pct"]) - self.gguf_writer.add_rope_dimension_count(int(rotary_factor * (hparams["hidden_size"] // hparams["num_attention_heads"]))) - self.gguf_writer.add_head_count(hparams["num_attention_heads"]) - self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"]) - self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True) - self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"])) - self.gguf_writer.add_file_type(self.ftype) - - _q_norms: list[dict[str, Tensor]] | None = None - _k_norms: list[dict[str, Tensor]] | None = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - n_head = self.hparams["num_attention_heads"] - n_kv_head = self.hparams["num_key_value_heads"] - - if name.find("q_layernorm.norms") != -1: - assert bid is not None - - if self._q_norms is None: - self._q_norms = [{} for _ in range(self.block_count)] - - self._q_norms[bid][name] = data_torch - - if len(self._q_norms[bid]) >= n_head: - return self._stack_qk_norm(bid, n_head, self._q_norms[bid], "q_layernorm") - else: - return [] - - if name.find("k_layernorm.norms") != -1: - assert bid is not None - - if self._k_norms is None: - self._k_norms = [{} for _ in range(self.block_count)] - - self._k_norms[bid][name] = data_torch - - if len(self._k_norms[bid]) >= n_kv_head: - return self._stack_qk_norm(bid, n_kv_head, self._k_norms[bid], "k_layernorm") - else: - return [] - - return [(self.map_tensor_name(name), data_torch)] - - def _stack_qk_norm(self, bid: int, n_head: int, norms: dict[str, Tensor], layer_name: str = "q_layernorm"): - datas: list[Tensor] = [] - # extract the norms in order - for xid in range(n_head): - ename = f"model.layers.{bid}.self_attn.{layer_name}.norms.{xid}.weight" - datas.append(norms[ename]) - del norms[ename] - data_torch = torch.stack(datas, dim=0) - - merged_name = f"model.layers.{bid}.self_attn.{layer_name}.weight" - new_name = self.map_tensor_name(merged_name) - - return [(new_name, data_torch)] - - def prepare_tensors(self): - super().prepare_tensors() - - if self._q_norms is not None or self._k_norms is not None: - # flatten two `list[dict[str, Tensor]]` into a single `list[str]` - norms = ( - [k for d in self._q_norms for k in d.keys()] if self._q_norms is not None else [] - ) + ( - [k for d in self._k_norms for k in d.keys()] if self._k_norms is not None else [] - ) - if len(norms) > 0: - raise ValueError(f"Unprocessed norms: {norms}") - - -@Model.register("LlamaForCausalLM", "MistralForCausalLM", "MixtralForCausalLM") -class LlamaModel(Model): - model_arch = gguf.MODEL_ARCH.LLAMA - - def set_vocab(self): - try: - self._set_vocab_sentencepiece() - except FileNotFoundError: - try: - self._set_vocab_llama_hf() - except (FileNotFoundError, TypeError): - # Llama 3 - self._set_vocab_gpt2() - - # Apply to CodeLlama only (and ignore for Llama 3 with a vocab size of 128256) - if self.hparams.get("vocab_size", 32000) == 32016: - special_vocab = gguf.SpecialVocab( - self.dir_model, load_merges=False, - special_token_types = ['prefix', 'suffix', 'middle', 'eot'] - ) - special_vocab._set_special_token("prefix", 32007) - special_vocab._set_special_token("suffix", 32008) - special_vocab._set_special_token("middle", 32009) - special_vocab._set_special_token("eot", 32010) - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - super().set_gguf_parameters() - hparams = self.hparams - self.gguf_writer.add_vocab_size(hparams["vocab_size"]) - - if "head_dim" in hparams: - rope_dim = hparams["head_dim"] - else: - rope_dim = hparams["hidden_size"] // hparams["num_attention_heads"] - self.gguf_writer.add_rope_dimension_count(rope_dim) - - if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: - if self.hparams["rope_scaling"].get("type") == "linear": - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - - tokenizer_config_file = self.dir_model / 'tokenizer_config.json' - if tokenizer_config_file.is_file(): - with open(tokenizer_config_file, "r", encoding="utf-8") as f: - tokenizer_config_json = json.load(f) - if "add_prefix_space" in tokenizer_config_json: - self.gguf_writer.add_add_space_prefix(tokenizer_config_json["add_prefix_space"]) - - # Apply to granite small models only - if self.hparams.get("vocab_size", 32000) == 49152: - self.gguf_writer.add_add_bos_token(False) - - @staticmethod - def permute(weights: Tensor, n_head: int, n_head_kv: int | None): - if n_head_kv is not None and n_head != n_head_kv: - n_head = n_head_kv - return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) - .swapaxes(1, 2) - .reshape(weights.shape)) - - _experts: list[dict[str, Tensor]] | None = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - n_head = self.hparams["num_attention_heads"] - n_kv_head = self.hparams.get("num_key_value_heads") - - if name.endswith(("q_proj.weight", "q_proj.bias")): - data_torch = LlamaModel.permute(data_torch, n_head, n_head) - if name.endswith(("k_proj.weight", "k_proj.bias")): - data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - - # process the experts separately - if name.find("block_sparse_moe.experts") != -1: - n_experts = self.hparams["num_local_experts"] - - assert bid is not None - - if self._experts is None: - self._experts = [{} for _ in range(self.block_count)] - - self._experts[bid][name] = data_torch - - if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - - # merge the experts into a single 3d tensor - for wid in ["w1", "w2", "w3"]: - datas: list[Tensor] = [] - - for xid in range(n_experts): - ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight" - datas.append(self._experts[bid][ename]) - del self._experts[bid][ename] - - data_torch = torch.stack(datas, dim=0) - - merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight" - - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors - else: - return [] - - return [(self.map_tensor_name(name), data_torch)] - - def prepare_tensors(self): - super().prepare_tensors() - - if self._experts is not None: - # flatten `list[dict[str, Tensor]]` into `list[str]` - experts = [k for d in self._experts for k in d.keys()] - if len(experts) > 0: - raise ValueError(f"Unprocessed experts: {experts}") - - -@Model.register("BitnetForCausalLM") -class BitnetModel(Model): - model_arch = gguf.MODEL_ARCH.BITNET - - def set_vocab(self): - self._set_vocab_sentencepiece() - - def set_gguf_parameters(self): - super().set_gguf_parameters() - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(1.0) - - def weight_quant(self, weight): - dtype = weight.dtype - weight = weight.float() - s = 1 / weight.abs().mean().clamp(min=1e-5) - weight = (weight * s).round().clamp(-1, 1) / s - scale = weight.abs().max().unsqueeze(0) - weight = torch.where(weight.abs().less(1e-6), 0, weight).type(dtype) - weight = torch.sign(weight).type(dtype) - return weight.type(dtype), scale.type(torch.float32) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - new_name = self.map_tensor_name(name) - - if any(self.match_model_tensor_name(new_name, key, bid) for key in [ - gguf.MODEL_TENSOR.ATTN_Q, - gguf.MODEL_TENSOR.ATTN_K, - gguf.MODEL_TENSOR.ATTN_V, - gguf.MODEL_TENSOR.ATTN_OUT, - gguf.MODEL_TENSOR.FFN_UP, - gguf.MODEL_TENSOR.FFN_DOWN, - gguf.MODEL_TENSOR.FFN_GATE, - ]): - # transform weight into 1/0/-1 (in fp32) - weight_torch, scale_torch = self.weight_quant(data_torch) - yield (new_name, weight_torch) - yield (new_name.removesuffix(".weight") + ".scale", scale_torch) - else: - yield (new_name, data_torch) - - -@Model.register("GrokForCausalLM") -class GrokModel(Model): - model_arch = gguf.MODEL_ARCH.GROK - - def set_vocab(self): - self._set_vocab_sentencepiece() - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def set_gguf_parameters(self): - super().set_gguf_parameters() - - _experts: list[dict[str, Tensor]] | None = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # process the experts separately - if name.find(".moe.") != -1: - n_experts = self.hparams["num_local_experts"] - - assert bid is not None - - if self._experts is None: - self._experts = [{} for _ in range(self.block_count)] - - self._experts[bid][name] = data_torch - - if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - - # merge the experts into a single 3d tensor - for wid in ["linear", "linear_1", "linear_v"]: - datas: list[Tensor] = [] - - for xid in range(n_experts): - ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight" - datas.append(self._experts[bid][ename]) - del self._experts[bid][ename] - - data_torch = torch.stack(datas, dim=0) - - merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight" - - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors - else: - return [] - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("DbrxForCausalLM") -class DbrxModel(Model): - model_arch = gguf.MODEL_ARCH.DBRX - - def set_gguf_parameters(self): - ffn_config = self.hparams["ffn_config"] - attn_config = self.hparams["attn_config"] - self.gguf_writer.add_block_count(self.hparams["n_layers"]) - - self.gguf_writer.add_context_length(self.hparams["max_seq_len"]) - self.gguf_writer.add_embedding_length(self.hparams["d_model"]) - self.gguf_writer.add_feed_forward_length(ffn_config["ffn_hidden_size"]) - - self.gguf_writer.add_head_count(self.hparams["n_heads"]) - self.gguf_writer.add_head_count_kv(attn_config["kv_n_heads"]) - - self.gguf_writer.add_rope_freq_base(attn_config["rope_theta"]) - - self.gguf_writer.add_clamp_kqv(attn_config["clip_qkv"]) - - self.gguf_writer.add_expert_count(ffn_config["moe_num_experts"]) - self.gguf_writer.add_expert_used_count(ffn_config["moe_top_k"]) - - self.gguf_writer.add_layer_norm_eps(1e-5) - - self.gguf_writer.add_file_type(self.ftype) - logger.info(f"gguf: file type = {self.ftype}") - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - n_expert = self.hparams["ffn_config"]["moe_num_experts"] - n_ff = self.hparams["ffn_config"]["ffn_hidden_size"] - n_embd = self.hparams["d_model"] - - # Specific behavior for experts tensors: suffix .weight, view as 3D and transpose - # original implementation expects (n_expert, n_ff, n_embd) for all experts weights - # But llama.cpp moe graph works differently - # AND the dimensions in ggml are typically in the reverse order of the pytorch dimensions - # so (n_expert, n_ff, n_embd) in pytorch is {n_embd, n_ff, n_expert} in ggml_tensor - exp_tensor_names = {"ffn.experts.mlp.w1": None, # LLM_TENSOR_FFN_GATE_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} - "ffn.experts.mlp.w2": (0, 2, 1), # LLM_TENSOR_FFN_DOWN_EXPS ggml_tensor->ne{n_ff, n_embd, n_expert} - "ffn.experts.mlp.v1": None} # LLM_TENSOR_FFN_UP_EXPS ggml_tensor->ne{n_embd, n_ff, n_expert} - experts = False - - for exp_tensor_name in exp_tensor_names.keys(): - if name.find(exp_tensor_name) != -1 and name.find(".weight") == -1: - experts = True - data_torch = data_torch.view(n_expert, n_ff, n_embd) - if (permute_tensor := exp_tensor_names[exp_tensor_name]) is not None: - data_torch = data_torch.permute(*permute_tensor) - break - - # map tensor names - # In MoE models the ffn tensors are typically most of the model weights, - # and need to be quantizable. Quantize expects tensor names to be suffixed by .weight. - # Every other model has the weight names ending in .weight, - # let's assume that is the convention which is not the case for dbrx: - # https://huggingface.co/databricks/dbrx-instruct/blob/main/model.safetensors.index.json#L15 - new_name = self.map_tensor_name(name if not experts else name + ".weight", try_suffixes=(".weight",)) - - return [(new_name, data_torch)] - - def extra_f16_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: - del name, new_name, bid # unused - - return n_dims > 1 - - -@Model.register("MiniCPMForCausalLM") -class MiniCPMModel(Model): - model_arch = gguf.MODEL_ARCH.MINICPM - - def set_gguf_parameters(self): - block_count = self.hparams["num_hidden_layers"] - self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_file_type(self.ftype) - - def set_vocab(self): - self._set_vocab_llama_hf() - - def _reverse_hf_permute(self, weights: Tensor, n_head: int, n_kv_head: int | None = None) -> Tensor: - if n_kv_head is not None and n_head != n_kv_head: - n_head //= n_kv_head - - return ( - weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) - .swapaxes(1, 2) - .reshape(weights.shape) - ) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - n_head = self.hparams["num_attention_heads"] - n_kv_head = self.hparams.get("num_key_value_heads") - - # HF models permute some of the tensors, so we need to undo that - if name.endswith(("q_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, n_head, n_head) - if name.endswith(("k_proj.weight")): - data_torch = self._reverse_hf_permute(data_torch, n_head, n_kv_head) - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("QWenLMHeadModel") -class QwenModel(Model): - model_arch = gguf.MODEL_ARCH.QWEN - - @staticmethod - def token_bytes_to_string(b): - from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode - byte_encoder = bytes_to_unicode() - return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')]) - - @staticmethod - def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]: - parts = [bytes([b]) for b in token] - while True: - min_idx = None - min_rank = None - for i, pair in enumerate(zip(parts[:-1], parts[1:])): - rank = mergeable_ranks.get(pair[0] + pair[1]) - if rank is not None and (min_rank is None or rank < min_rank): - min_idx = i - min_rank = rank - if min_rank is None or (max_rank is not None and min_rank >= max_rank): - break - assert min_idx is not None - parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:] - return parts - - def set_vocab(self): - self._set_vocab_qwen() - - def set_gguf_parameters(self): - self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) - self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"]) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"]) - self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) - self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - -@Model.register("Qwen2ForCausalLM") -class Qwen2Model(Model): - model_arch = gguf.MODEL_ARCH.QWEN2 - - def set_vocab(self): - try: - self._set_vocab_sentencepiece() - except FileNotFoundError: - self._set_vocab_gpt2() - - -@Model.register("Qwen2MoeForCausalLM") -class Qwen2MoeModel(Model): - model_arch = gguf.MODEL_ARCH.QWEN2MOE - - def set_gguf_parameters(self): - super().set_gguf_parameters() - if (n_experts := self.hparams.get("num_experts")) is not None: - self.gguf_writer.add_expert_count(n_experts) - if (moe_intermediate_size := self.hparams.get("moe_intermediate_size")) is not None: - self.gguf_writer.add_expert_feed_forward_length(moe_intermediate_size) - logger.info(f"gguf: expert feed forward length = {moe_intermediate_size}") - if (shared_expert_intermediate_size := self.hparams.get('shared_expert_intermediate_size')) is not None: - self.gguf_writer.add_expert_shared_feed_forward_length(shared_expert_intermediate_size) - logger.info(f"gguf: expert shared feed forward length = {shared_expert_intermediate_size}") - - _experts: list[dict[str, Tensor]] | None = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # process the experts separately - if name.find("experts") != -1: - n_experts = self.hparams["num_experts"] - assert bid is not None - - if self._experts is None: - self._experts = [{} for _ in range(self.block_count)] - - self._experts[bid][name] = data_torch - - if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - - # merge the experts into a single 3d tensor - for w_name in ["down_proj", "gate_proj", "up_proj"]: - datas: list[Tensor] = [] - - for xid in range(n_experts): - ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight" - datas.append(self._experts[bid][ename]) - del self._experts[bid][ename] - - data_torch = torch.stack(datas, dim=0) - - merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors - else: - return [] - - return [(self.map_tensor_name(name), data_torch)] - - def prepare_tensors(self): - super().prepare_tensors() - - if self._experts is not None: - # flatten `list[dict[str, Tensor]]` into `list[str]` - experts = [k for d in self._experts for k in d.keys()] - if len(experts) > 0: - raise ValueError(f"Unprocessed experts: {experts}") - - -@Model.register("GPT2LMHeadModel") -class GPT2Model(Model): - model_arch = gguf.MODEL_ARCH.GPT2 - - def set_gguf_parameters(self): - self.gguf_writer.add_block_count(self.hparams["n_layer"]) - self.gguf_writer.add_context_length(self.hparams["n_ctx"]) - self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) - self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"]) - self.gguf_writer.add_head_count(self.hparams["n_head"]) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - tensors: list[tuple[str, Tensor]] = [] - - # we don't need these - if name.endswith((".attn.bias", ".attn.masked_bias")): - return tensors - - if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_proj.weight")): - data_torch = data_torch.transpose(1, 0) - - new_name = self.map_tensor_name(name) - - tensors.append((new_name, data_torch)) - - # note: GPT2 output is tied to (same as) wte in original model - if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - - return tensors - - -@Model.register("PhiForCausalLM") -class Phi2Model(Model): - model_arch = gguf.MODEL_ARCH.PHI2 - - def set_gguf_parameters(self): - block_count = self.find_hparam(["num_hidden_layers", "n_layer"]) - - rot_pct = self.find_hparam(["partial_rotary_factor"]) - n_embd = self.find_hparam(["hidden_size", "n_embd"]) - n_head = self.find_hparam(["num_attention_heads", "n_head"]) - - self.gguf_writer.add_context_length(self.find_hparam(["n_positions", "max_position_embeddings"])) - - self.gguf_writer.add_embedding_length(n_embd) - self.gguf_writer.add_feed_forward_length(4 * n_embd) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(n_head) - self.gguf_writer.add_head_count_kv(n_head) - self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_epsilon", "layer_norm_eps"])) - self.gguf_writer.add_rope_dimension_count(int(rot_pct * n_embd) // n_head) - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_add_bos_token(False) - - -@Model.register("Phi3ForCausalLM") -class Phi3MiniModel(Model): - model_arch = gguf.MODEL_ARCH.PHI3 - - def set_vocab(self): - from sentencepiece import SentencePieceProcessor - - tokenizer_path = self.dir_model / 'tokenizer.model' - - if not tokenizer_path.is_file(): - raise ValueError(f'Error: Missing {tokenizer_path}') - - tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(str(tokenizer_path)) - - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) - - tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] - scores: list[float] = [-10000.0] * vocab_size - toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - - for token_id in range(tokenizer.vocab_size()): - - piece = tokenizer.IdToPiece(token_id) - text = piece.encode("utf-8") - score = tokenizer.GetScore(token_id) - - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.IsUnknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.IsControl(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.IsUnused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.IsByte(token_id): - toktype = SentencePieceTokenTypes.BYTE - - tokens[token_id] = text - scores[token_id] = score - toktypes[token_id] = toktype - - added_tokens_file = self.dir_model / 'added_tokens.json' - if added_tokens_file.is_file(): - with open(added_tokens_file, "r", encoding="utf-8") as f: - added_tokens_json = json.load(f) - - for key in added_tokens_json: - token_id = added_tokens_json[key] - if token_id >= vocab_size: - logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}') - continue - - tokens[token_id] = key.encode("utf-8") - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - - tokenizer_config_file = self.dir_model / 'tokenizer_config.json' - if tokenizer_config_file.is_file(): - with open(tokenizer_config_file, "r", encoding="utf-8") as f: - tokenizer_config_json = json.load(f) - added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {}) - for token_id, foken_data in added_tokens_decoder.items(): - token_id = int(token_id) - token = foken_data["content"].encode("utf-8") - if toktypes[token_id] != SentencePieceTokenTypes.UNUSED: - if tokens[token_id] != token: - logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}') - tokens[token_id] = token - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - if foken_data.get("special"): - toktypes[token_id] = SentencePieceTokenTypes.CONTROL - - tokenizer_file = self.dir_model / 'tokenizer.json' - if tokenizer_file.is_file(): - with open(tokenizer_file, "r", encoding="utf-8") as f: - tokenizer_json = json.load(f) - added_tokens = tokenizer_json.get("added_tokens", []) - for foken_data in added_tokens: - token_id = int(foken_data["id"]) - token = foken_data["content"].encode("utf-8") - if toktypes[token_id] != SentencePieceTokenTypes.UNUSED: - if tokens[token_id] != token: - logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}') - tokens[token_id] = token - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - if foken_data.get("special"): - toktypes[token_id] = SentencePieceTokenTypes.CONTROL - - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - block_count = self.find_hparam(["num_hidden_layers", "n_layer"]) - - n_embd = self.find_hparam(["hidden_size", "n_embd"]) - n_head = self.find_hparam(["num_attention_heads", "n_head"]) - n_head_kv = self.find_hparam(["num_key_value_heads", "n_head_kv"]) - rms_eps = self.find_hparam(["rms_norm_eps"]) - max_pos_embds = self.find_hparam(["n_positions", "max_position_embeddings"]) - orig_max_pos_embds = self.find_hparam(["original_max_position_embeddings"]) - rope_dims = n_embd // n_head - - self.gguf_writer.add_context_length(max_pos_embds) - self.gguf_writer.add_rope_scaling_orig_ctx_len(orig_max_pos_embds) - self.gguf_writer.add_embedding_length(n_embd) - self.gguf_writer.add_feed_forward_length(self.find_hparam(["intermediate_size"])) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(n_head) - self.gguf_writer.add_head_count_kv(n_head_kv) - self.gguf_writer.add_layer_norm_rms_eps(rms_eps) - self.gguf_writer.add_rope_dimension_count(rope_dims) - self.gguf_writer.add_rope_freq_base(self.find_hparam(["rope_theta"])) - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_sliding_window(self.find_hparam(["sliding_window"])) - - # write rope scaling for long context (128k) model - rope_scaling = self.find_hparam(['rope_scaling'], True) - if rope_scaling is None: - return - - scale = max_pos_embds / orig_max_pos_embds - - rope_scaling_type = rope_scaling.get('type', '').lower() - if len(rope_scaling_type) == 0: - raise KeyError('Missing the required key rope_scaling.type') - - if rope_scaling_type == 'su' or rope_scaling_type == 'longrope': - attn_factor = math.sqrt(1 + math.log(scale) / math.log(orig_max_pos_embds)) if scale > 1.0 else 1.0 - elif rope_scaling_type == 'yarn': - attn_factor = 0.1 * math.log(scale) + 1.0 if scale > 1.0 else 1.0 - else: - raise NotImplementedError(f'The rope scaling type {rope_scaling_type} is not supported yet') - - self.gguf_writer.add_rope_scaling_attn_factors(attn_factor) - - long_factors = rope_scaling.get('long_factor', None) - short_factors = rope_scaling.get('short_factor', None) - - if long_factors is None or short_factors is None: - raise KeyError('Missing the required key rope_scaling.long_factor or rope_scaling_short_factor') - - if len(long_factors) != len(short_factors) or len(long_factors) != rope_dims / 2: - raise ValueError(f'The length of rope long and short factors must be {rope_dims / 2}') - - self.gguf_writer.add_tensor(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ROPE_FACTORS_LONG] + ".weight", np.array(long_factors, dtype=np.float32)) - self.gguf_writer.add_tensor(gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ROPE_FACTORS_SHORT] + ".weight", np.array(short_factors, dtype=np.float32)) - - -@Model.register("PlamoForCausalLM") -class PlamoModel(Model): - model_arch = gguf.MODEL_ARCH.PLAMO - - def set_vocab(self): - self._set_vocab_sentencepiece() - - def set_gguf_parameters(self): - hparams = self.hparams - block_count = hparams["num_hidden_layers"] - - self.gguf_writer.add_context_length(4096) # not in config.json - self.gguf_writer.add_embedding_length(hparams["hidden_size"]) - self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(hparams["num_attention_heads"]) - self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong - self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"]) - self.gguf_writer.add_file_type(self.ftype) - - def shuffle_attn_q_weight(self, data_torch): - assert data_torch.size() == (5120, 5120) - data_torch = data_torch.reshape(8, 5, 128, 5120) - data_torch = torch.permute(data_torch, (1, 0, 2, 3)) - data_torch = torch.reshape(data_torch, (5120, 5120)) - return data_torch - - def shuffle_attn_output_weight(self, data_torch): - assert data_torch.size() == (5120, 5120) - data_torch = data_torch.reshape(5120, 8, 5, 128) - data_torch = torch.permute(data_torch, (0, 2, 1, 3)) - data_torch = torch.reshape(data_torch, (5120, 5120)) - return data_torch - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - new_name = self.map_tensor_name(name) - - # shuffle for broadcasting of gqa in ggml_mul_mat - if new_name.endswith("attn_q.weight"): - data_torch = self.shuffle_attn_q_weight(data_torch) - elif new_name.endswith("attn_output.weight"): - data_torch = self.shuffle_attn_output_weight(data_torch) - - return [(new_name, data_torch)] - - -@Model.register("CodeShellForCausalLM") -class CodeShellModel(Model): - model_arch = gguf.MODEL_ARCH.CODESHELL - - def set_gguf_parameters(self): - block_count = self.hparams["n_layer"] - - self.gguf_writer.add_context_length(self.hparams["n_positions"]) - self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) - self.gguf_writer.add_feed_forward_length(4 * self.hparams["n_embd"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_head_count(self.hparams["n_head"]) - self.gguf_writer.add_head_count_kv(self.hparams["num_query_groups"]) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_rope_freq_base(10000.0) - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(1.0) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - new_name = self.map_tensor_name(name) - - tensors: list[tuple[str, Tensor]] = [(new_name, data_torch)] - - if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): - assert self.tensor_names is not None - - if all(s not in self.tensor_names for s in ("lm_head.weight", "output.weight")): - # copy tok_embd.weight to output.weight - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch)) - - return tensors - - -@Model.register("InternLM2ForCausalLM") -class InternLM2Model(Model): - model_arch = gguf.MODEL_ARCH.INTERNLM2 - - def set_vocab(self): - # (TODO): Is there a better way? - # Copy from _set_vocab_sentencepiece, The only difference is that we will treat the character - # \x00 specially and convert it into an emoji character to prevent it from being mistakenly - # recognized as an empty string in C++. - from sentencepiece import SentencePieceProcessor - from sentencepiece import sentencepiece_model_pb2 as model - - tokenizer_path = self.dir_model / 'tokenizer.model' - - tokens: list[bytes] = [] - scores: list[float] = [] - toktypes: list[int] = [] - - if not tokenizer_path.is_file(): - logger.error(f'Error: Missing {tokenizer_path}') - sys.exit(1) - - sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue] - sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read()) - add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix - - tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(str(tokenizer_path)) - - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) - - for token_id in range(vocab_size): - piece = tokenizer.IdToPiece(token_id) - text = piece.encode("utf-8") - score = tokenizer.GetScore(token_id) - if text == b"\x00": - # (TODO): fixme - # Hack here and replace the \x00 characters. - logger.warning(f"InternLM2 convert token '{text}' to '🐉'!") - text = "🐉".encode("utf-8") - - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.IsUnknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.IsControl(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.IsUnused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.IsByte(token_id): - toktype = SentencePieceTokenTypes.BYTE - # take care of ununsed raw token - if piece.startswith('[UNUSED'): - toktype = SentencePieceTokenTypes.UNUSED - - tokens.append(text) - scores.append(score) - toktypes.append(toktype) - - added_tokens_file = self.dir_model / 'added_tokens.json' - if added_tokens_file.is_file(): - with open(added_tokens_file, "r", encoding="utf-8") as f: - added_tokens_json = json.load(f) - - for key in added_tokens_json: - tokens.append(key.encode("utf-8")) - scores.append(-1000.0) - toktypes.append(SentencePieceTokenTypes.USER_DEFINED) - - chat_eos_token = '<|im_end|>' - chat_eos_token_id = None - - tokenizer_config_file = self.dir_model / 'tokenizer_config.json' - if tokenizer_config_file.is_file(): - with open(tokenizer_config_file, "r", encoding="utf-8") as f: - tokenizer_config_json = json.load(f) - added_tokens_decoder = tokenizer_config_json.get("added_tokens_decoder", {}) - for token_id, foken_data in added_tokens_decoder.items(): - token_id = int(token_id) - token = foken_data["content"] - if token == chat_eos_token: - chat_eos_token_id = token_id - token = token.encode("utf-8") - if toktypes[token_id] != SentencePieceTokenTypes.UNUSED: - if tokens[token_id] != token: - logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}') - tokens[token_id] = token - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - if foken_data.get("special"): - toktypes[token_id] = SentencePieceTokenTypes.CONTROL - - tokenizer_file = self.dir_model / 'tokenizer.json' - if tokenizer_file.is_file(): - with open(tokenizer_file, "r", encoding="utf-8") as f: - tokenizer_json = json.load(f) - added_tokens = tokenizer_json.get("added_tokens", []) - for foken_data in added_tokens: - token_id = int(foken_data["id"]) - token = foken_data["content"] - if token == chat_eos_token: - chat_eos_token_id = token_id - token = token.encode("utf-8") - if toktypes[token_id] != SentencePieceTokenTypes.UNUSED: - if tokens[token_id] != token: - logger.warning(f'replacing token {token_id}: {tokens[token_id].decode("utf-8")!r} -> {token.decode("utf-8")!r}') - tokens[token_id] = token - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - if foken_data.get("special"): - toktypes[token_id] = SentencePieceTokenTypes.CONTROL - - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - self.gguf_writer.add_add_space_prefix(add_prefix) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - old_eos = special_vocab.special_token_ids["eos"] - if chat_eos_token_id is not None: - # For the chat model, we replace the eos with '<|im_end|>'. - # TODO: this is a hack, should be fixed - # https://github.com/ggerganov/llama.cpp/pull/6745#issuecomment-2067687048 - special_vocab.special_token_ids["eos"] = chat_eos_token_id - logger.warning(f"Replace eos:{old_eos} with a special token:{chat_eos_token_id}" - " in chat mode so that the conversation can end normally.") - - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) - self.gguf_writer.add_block_count(self.hparams["num_hidden_layers"]) - self.gguf_writer.add_embedding_length(self.hparams["hidden_size"]) - self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"]) - self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"]) - self.gguf_writer.add_file_type(self.ftype) - if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: - if self.hparams["rope_scaling"].get("type") == "linear": - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.LINEAR) - self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - num_heads = self.hparams["num_attention_heads"] - num_kv_heads = self.hparams["num_key_value_heads"] - n_embd = self.hparams["hidden_size"] - q_per_kv = num_heads // num_kv_heads - head_dim = n_embd // num_heads - num_groups = num_heads // q_per_kv - - if bid is not None and f"model.layers.{bid}.attention.wqkv" in name: - qkv = data_torch - - qkv = qkv.reshape((num_groups, q_per_kv + 2, head_dim, n_embd)) - q, k, v = qkv[:, : q_per_kv], qkv[:, -2], qkv[:, -1] - - # The model weights of q and k equire additional reshape. - q = LlamaModel.permute(q.reshape((-1, q.shape[-1])), num_heads, num_heads) - k = LlamaModel.permute(k.reshape((-1, k.shape[-1])), num_heads, num_kv_heads) - v = v.reshape((-1, v.shape[-1])) - - return [ - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_Q, bid), q), - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_K, bid), k), - (self.format_tensor_name(gguf.MODEL_TENSOR.ATTN_V, bid), v), - ] - else: - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("BertModel", "CamembertModel") -class BertModel(Model): - model_arch = gguf.MODEL_ARCH.BERT - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.vocab_size = None - - def set_gguf_parameters(self): - super().set_gguf_parameters() - self.gguf_writer.add_causal_attention(False) - - # get pooling path - pooling_path = None - module_path = self.dir_model / "modules.json" - if module_path.is_file(): - with open(module_path, encoding="utf-8") as f: - modules = json.load(f) - for mod in modules: - if mod["type"] == "sentence_transformers.models.Pooling": - pooling_path = mod["path"] - break - - # get pooling type - if pooling_path is not None: - with open(self.dir_model / pooling_path / "config.json", encoding="utf-8") as f: - pooling = json.load(f) - if pooling["pooling_mode_mean_tokens"]: - pooling_type = gguf.PoolingType.MEAN - elif pooling["pooling_mode_cls_token"]: - pooling_type = gguf.PoolingType.CLS - else: - raise NotImplementedError("Only MEAN and CLS pooling types supported") - self.gguf_writer.add_pooling_type(pooling_type) - - def set_vocab(self): - tokens, toktypes, tokpre = self.get_vocab_base() - self.vocab_size = len(tokens) - - # we need this to validate the size of the token_type embeddings - # though currently we are passing all zeros to the token_type embeddings - self.gguf_writer.add_token_type_count(2) # "Sequence A" or "Sequence B" - - # convert to phantom space vocab - def phantom(tok): - if tok.startswith("[") and tok.endswith("]"): - return tok - if tok.startswith("##"): - return tok[2:] - return "\u2581" + tok - tokens = list(map(phantom, tokens)) - - # add vocab to gguf - self.gguf_writer.add_tokenizer_model("bert") - self.gguf_writer.add_tokenizer_pre(tokpre) - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - - # handle special tokens - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - # we are only using BERT for embeddings so we don't need the pooling layer - if name in ("embeddings.position_ids", "pooler.dense.weight", "pooler.dense.bias"): - return [] # we don't need these - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("NomicBertModel") -class NomicBertModel(BertModel): - model_arch = gguf.MODEL_ARCH.NOMIC_BERT - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # the HF config claims n_ctx=8192, but it uses RoPE scaling - self.hparams["n_ctx"] = 2048 - - # SwigLU activation - assert self.hparams["activation_function"] == "swiglu" - # this doesn't do anything in the HF version - assert self.hparams["causal"] is False - # no bias tensors - assert self.hparams["qkv_proj_bias"] is False - assert self.hparams["mlp_fc1_bias"] is False - assert self.hparams["mlp_fc2_bias"] is False - # norm at end of layer - assert self.hparams["prenorm"] is False - # standard RoPE - assert self.hparams["rotary_emb_fraction"] == 1.0 - assert self.hparams["rotary_emb_interleaved"] is False - assert self.hparams["rotary_emb_scale_base"] is None - - def set_gguf_parameters(self): - super().set_gguf_parameters() - self.gguf_writer.add_rope_freq_base(self.hparams["rotary_emb_base"]) - - -@Model.register("GemmaForCausalLM") -class GemmaModel(Model): - model_arch = gguf.MODEL_ARCH.GEMMA - - def set_vocab(self): - self._set_vocab_sentencepiece() - - # TODO: these special tokens should be exported only for the CodeGemma family - special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False, - special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot']) - special_vocab._set_special_token("prefix", 67) - special_vocab._set_special_token("suffix", 69) - special_vocab._set_special_token("middle", 68) - special_vocab._set_special_token("fsep", 70) - special_vocab._set_special_token("eot", 107) - special_vocab.chat_template = None # do not add it twice - special_vocab.add_to_gguf(self.gguf_writer) - - self.gguf_writer.add_add_space_prefix(False) - - def set_gguf_parameters(self): - hparams = self.hparams - block_count = hparams["num_hidden_layers"] - - self.gguf_writer.add_context_length(hparams["max_position_embeddings"]) - self.gguf_writer.add_embedding_length(hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) - self.gguf_writer.add_head_count(hparams["num_attention_heads"]) - self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_key_length(hparams["head_dim"]) - self.gguf_writer.add_value_length(hparams["head_dim"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - # lm_head is not used in llama.cpp, while autoawq will include this tensor in model - # To prevent errors, skip loading lm_head.weight. - if name == "lm_head.weight": - logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") - return [] - - # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89 - if name.endswith("norm.weight"): - data_torch = data_torch + 1 - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("Gemma2ForCausalLM") -class Gemma2Model(Model): - model_arch = gguf.MODEL_ARCH.GEMMA2 - - def set_vocab(self): - self._set_vocab_sentencepiece() - - self.gguf_writer.add_add_space_prefix(False) - - def set_gguf_parameters(self): - hparams = self.hparams - block_count = hparams["num_hidden_layers"] - - self.gguf_writer.add_context_length(hparams["max_position_embeddings"]) - self.gguf_writer.add_embedding_length(hparams["hidden_size"]) - self.gguf_writer.add_block_count(block_count) - self.gguf_writer.add_feed_forward_length(hparams["intermediate_size"]) - self.gguf_writer.add_head_count(hparams["num_attention_heads"]) - self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"] if "num_key_value_heads" in hparams else hparams["num_attention_heads"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) - self.gguf_writer.add_key_length(hparams["head_dim"]) - self.gguf_writer.add_value_length(hparams["head_dim"]) - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_attn_logit_softcapping( - self.hparams["attn_logit_softcapping"] - ) - self.gguf_writer.add_final_logit_softcapping( - self.hparams["final_logit_softcapping"] - ) - self.gguf_writer.add_sliding_window(self.hparams["sliding_window"]) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - # lm_head is not used in llama.cpp, while autoawq will include this tensor in model - # To prevent errors, skip loading lm_head.weight. - if name == "lm_head.weight": - logger.debug(f"Skipping get tensor {name!r} in safetensors so that convert can end normally.") - return [] - - # ref: https://github.com/huggingface/transformers/blob/fc37f38915372c15992b540dfcbbe00a916d4fc6/src/transformers/models/gemma/modeling_gemma.py#L89 - if name.endswith("norm.weight"): - data_torch = data_torch + 1 - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("Starcoder2ForCausalLM") -class StarCoder2Model(Model): - model_arch = gguf.MODEL_ARCH.STARCODER2 - - -@Model.register("MambaForCausalLM", "MambaLMHeadModel") -class MambaModel(Model): - model_arch = gguf.MODEL_ARCH.MAMBA - - def set_vocab(self): - vocab_size = self.hparams["vocab_size"] - # Round vocab size to next multiple of 8 - pad_vocab = self.hparams.get("pad_vocab_size_multiple", 8) - # pad using ceiling division - # ref: https://stackoverflow.com/a/17511341/22827863 - vocab_size = -(vocab_size // -pad_vocab) * pad_vocab - self.hparams["vocab_size"] = vocab_size - - if (self.dir_model / "tokenizer.json").is_file(): - self._set_vocab_gpt2() - elif (self.dir_model / "tokenizer.model").is_file(): - self._set_vocab_sentencepiece() - else: - # Use the GPT-NeoX tokenizer when no tokenizer files are present - self._set_vocab_builtin("gpt-neox", vocab_size) - - def set_gguf_parameters(self): - d_model = self.find_hparam(["hidden_size", "d_model"]) - d_conv = self.find_hparam(["conv_kernel", "d_conv"], optional=True) or 4 - d_inner = self.find_hparam(["intermediate_size", "d_inner"], optional=True) or 2 * d_model - d_state = self.find_hparam(["state_size", "d_state"], optional=True) or 16 - # ceiling division - # ref: https://stackoverflow.com/a/17511341/22827863 - # ref: https://github.com/state-spaces/mamba/blob/ce59daea3a090d011d6476c6e5b97f6d58ddad8b/mamba_ssm/modules/mamba_simple.py#L58 - dt_rank = self.find_hparam(["time_step_rank", "dt_rank"], optional=True) or -(d_model // -16) - rms_norm_eps = self.find_hparam(["layer_norm_epsilon", "rms_norm_eps"], optional=True) or 1e-5 - - # Fail early for models which don't have a block expansion factor of 2 - assert d_inner == 2 * d_model - - self.gguf_writer.add_context_length(2**20) # arbitrary value; for those who use the default - self.gguf_writer.add_embedding_length(d_model) - self.gguf_writer.add_feed_forward_length(0) # unused, but seemingly required when loading - self.gguf_writer.add_head_count(0) # unused, but seemingly required when loading - self.gguf_writer.add_block_count(self.hparams["n_layer"]) - self.gguf_writer.add_ssm_conv_kernel(d_conv) - self.gguf_writer.add_ssm_inner_size(d_inner) - self.gguf_writer.add_ssm_state_size(d_state) - self.gguf_writer.add_ssm_time_step_rank(dt_rank) - self.gguf_writer.add_layer_norm_rms_eps(rms_norm_eps) - self.gguf_writer.add_file_type(self.ftype) - - _tok_embd = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - output_name = self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT) - tok_embd_name = self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD) - - new_name = self.map_tensor_name(name) - - if name.endswith(".A_log"): - logger.debug("A_log --> A ==> " + new_name) - data_torch = -torch.exp(data_torch) - - # assuming token_embd.weight is seen before output.weight - if self._tok_embd is not None and new_name == output_name: - if torch.equal(self._tok_embd, data_torch): - logger.debug(f"{output_name} is equivalent to {tok_embd_name}, omitting") - return [] - elif new_name == tok_embd_name: - self._tok_embd = data_torch - - return [(new_name, data_torch)] - - def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool: - del n_dims # unused - - return bid is not None and new_name in ( - self.format_tensor_name(n, bid, ".weight" if name.endswith(".weight") else "") for n in [ - gguf.MODEL_TENSOR.SSM_CONV1D, - gguf.MODEL_TENSOR.SSM_X, - gguf.MODEL_TENSOR.SSM_DT, - gguf.MODEL_TENSOR.SSM_A, - gguf.MODEL_TENSOR.SSM_D, - ] - ) - - -@Model.register("CohereForCausalLM") -class CommandR2Model(Model): - model_arch = gguf.MODEL_ARCH.COMMAND_R - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # max_position_embeddings = 8192 in config.json but model was actually - # trained on 128k context length - # aya-23 models don't have model_max_length specified - self.hparams["max_position_embeddings"] = self.find_hparam(["model_max_length", "max_position_embeddings"]) - - def set_gguf_parameters(self): - super().set_gguf_parameters() - self.gguf_writer.add_logit_scale(self.hparams["logit_scale"]) - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.NONE) - - -@Model.register("OlmoForCausalLM") -@Model.register("OLMoForCausalLM") -class OlmoModel(Model): - model_arch = gguf.MODEL_ARCH.OLMO - - def set_gguf_parameters(self): - super().set_gguf_parameters() - self.gguf_writer.add_layer_norm_eps(1e-5) - clip_qkv = self.hparams.get("clip_qkv") - if clip_qkv is not None: - self.gguf_writer.add_clamp_kqv(clip_qkv) - - # Same as super class, but permuting q_proj, k_proj - # Copied from: LlamaModel - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - n_head = self.hparams["num_attention_heads"] - n_kv_head = self.hparams.get("num_key_value_heads") - - if name.endswith("q_proj.weight"): - data_torch = LlamaModel.permute(data_torch, n_head, n_head) - if name.endswith("k_proj.weight"): - data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("JinaBertModel", "JinaBertForMaskedLM") -class JinaBertV2Model(BertModel): - model_arch = gguf.MODEL_ARCH.JINA_BERT_V2 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.intermediate_size = self.hparams["intermediate_size"] - - def get_tensors(self): - for name, data in super().get_tensors(): - if 'gated_layer' in name: - d1 = data[:self.intermediate_size, :] - name1 = name.replace('gated_layers', 'gated_layers_w') - name1 = name1.replace('up_gated_layer', 'gated_layers_v') - d2 = data[self.intermediate_size:, :] - name2 = name.replace('gated_layers', 'gated_layers_v') - name2 = name2.replace('up_gated_layer', 'gated_layers_w') - yield name1, d1 - yield name2, d2 - continue - - yield name, data - - def set_vocab(self): - tokenizer_class = 'BertTokenizer' - with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f: - tokenizer_class = json.load(f)['tokenizer_class'] - - if tokenizer_class == 'BertTokenizer': - super().set_vocab() - elif tokenizer_class == 'RobertaTokenizer': - self._set_vocab_gpt2() - self.gguf_writer.add_token_type_count(2) - else: - raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel') - self.gguf_writer.add_add_bos_token(True) - self.gguf_writer.add_add_eos_token(True) - - -@Model.register("OpenELMForCausalLM") -class OpenELMModel(Model): - model_arch = gguf.MODEL_ARCH.OPENELM - - @staticmethod - def _make_divisible(v: float | int, divisor: int) -> int: - # ref: https://huggingface.co/apple/OpenELM-270M-Instruct/blob/eb111ff2e6724348e5b905984063d4064d4bc579/configuration_openelm.py#L34-L38 - new_v = max(divisor, int(v + divisor / 2) // divisor * divisor) - # Make sure that round down does not go down by more than 10%. - if new_v < 0.9 * v: - new_v += divisor - return new_v - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - ffn_multipliers: list[float] = self.hparams["ffn_multipliers"] - ffn_dim_divisor: int = self.hparams["ffn_dim_divisor"] - self._n_embd: int = self.hparams["model_dim"] - self._num_kv_heads: list[int] = self.hparams["num_kv_heads"] - self._num_query_heads: list[int] = self.hparams["num_query_heads"] - self._ffn_dims: list[int] = [ - OpenELMModel._make_divisible(multiplier * self._n_embd, ffn_dim_divisor) - for multiplier in ffn_multipliers - ] - assert isinstance(self._num_kv_heads, list) and isinstance(self._num_kv_heads[0], int) - assert isinstance(self._num_query_heads, list) and isinstance(self._num_query_heads[0], int) - - # Uses the tokenizer from meta-llama/Llama-2-7b-hf - def set_vocab(self): - try: - self._set_vocab_sentencepiece() - except FileNotFoundError: - self._set_vocab_builtin("llama-spm", self.hparams["vocab_size"]) - - def set_gguf_parameters(self): - n_embd = self._n_embd - head_dim = self.hparams["head_dim"] - rot_pct = 1.0 - assert self.block_count == len(self._num_kv_heads) - assert self.block_count == len(self._num_query_heads) - assert self.block_count == len(self._ffn_dims) - - self.gguf_writer.add_block_count(self.block_count) - self.gguf_writer.add_context_length(self.hparams["max_context_length"]) - self.gguf_writer.add_embedding_length(n_embd) - self.gguf_writer.add_feed_forward_length(self._ffn_dims) - self.gguf_writer.add_head_count(self._num_query_heads) - self.gguf_writer.add_head_count_kv(self._num_kv_heads) - self.gguf_writer.add_rope_freq_base(self.hparams["rope_freq_constant"]) - # https://huggingface.co/apple/OpenELM-270M-Instruct/blob/c401df2/modeling_openelm.py#L30 - self.gguf_writer.add_layer_norm_rms_eps(1e-6) - self.gguf_writer.add_rope_dimension_count(int(rot_pct * head_dim)) - self.gguf_writer.add_key_length(head_dim) - self.gguf_writer.add_value_length(head_dim) - self.gguf_writer.add_file_type(self.ftype) - - def find_hparam(self, keys: Iterable[str], optional: bool = False) -> Any: - if "n_layers" in keys: - return self.hparams["num_transformer_layers"] - - return super().find_hparam(keys, optional) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - - # split ff - if bid is not None and name == f"transformer.layers.{bid}.ffn.proj_1.weight": - ff_dim = self._ffn_dims[bid] - yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_GATE, bid), data_torch[:ff_dim]) - yield (self.format_tensor_name(gguf.MODEL_TENSOR.FFN_UP, bid), data_torch[ff_dim:]) - return - - yield (self.map_tensor_name(name), data_torch) - - -@Model.register("ArcticForCausalLM") -class ArcticModel(Model): - model_arch = gguf.MODEL_ARCH.ARCTIC - - def set_vocab(self): - # The reason for using a custom implementation here is that the - # snowflake-arctic-instruct model redefined tokens 31998 and 31999 from - # tokenizer.model and used them as BOS and EOS instead of adding new tokens. - from sentencepiece import SentencePieceProcessor - - tokenizer_path = self.dir_model / 'tokenizer.model' - - if not tokenizer_path.is_file(): - logger.error(f'Error: Missing {tokenizer_path}') - sys.exit(1) - - # Read the whole vocabulary from the tokenizer.model file - tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(str(tokenizer_path)) - - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) - - tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] - scores: list[float] = [-10000.0] * vocab_size - toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - - for token_id in range(tokenizer.vocab_size()): - - piece = tokenizer.IdToPiece(token_id) - text = piece.encode("utf-8") - score = tokenizer.GetScore(token_id) - - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.IsUnknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.IsControl(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.IsUnused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.IsByte(token_id): - toktype = SentencePieceTokenTypes.BYTE - - tokens[token_id] = text - scores[token_id] = score - toktypes[token_id] = toktype - - # Use the added_tokens_decoder field from tokeniser_config.json as the source - # of information about added/redefined tokens and modify them accordingly. - tokenizer_config_file = self.dir_model / 'tokenizer_config.json' - if tokenizer_config_file.is_file(): - with open(tokenizer_config_file, "r", encoding="utf-8") as f: - tokenizer_config_json = json.load(f) - - if "added_tokens_decoder" in tokenizer_config_json: - added_tokens_decoder = tokenizer_config_json["added_tokens_decoder"] - for token_id, token_json in added_tokens_decoder.items(): - token_id = int(token_id) - if token_id >= vocab_size: - logger.debug(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}') - continue - - token_content = token_json["content"] - token_type = SentencePieceTokenTypes.USER_DEFINED - token_score = -10000.0 - - # Map unk_token to UNKNOWN, other special tokens to CONTROL - # Set the score to 0.0 as in the original tokenizer.model - if ("special" in token_json) and token_json["special"]: - if token_content == tokenizer_config_json["unk_token"]: - token_type = SentencePieceTokenTypes.UNKNOWN - else: - token_type = SentencePieceTokenTypes.CONTROL - token_score = 0.0 - - logger.info(f"Setting added token {token_id} to '{token_content}' (type: {token_type}, score: {token_score:.2f})") - tokens[token_id] = token_content.encode("utf-8") - toktypes[token_id] = token_type - scores[token_id] = token_score - - self.gguf_writer.add_tokenizer_model("llama") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - super().set_gguf_parameters() - hparams = self.hparams - self.gguf_writer.add_vocab_size(hparams["vocab_size"]) - self.gguf_writer.add_rope_dimension_count(hparams["hidden_size"] // hparams["num_attention_heads"]) - - _experts: list[dict[str, Tensor]] | None = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - n_head = self.hparams["num_attention_heads"] - n_kv_head = self.hparams.get("num_key_value_heads") - - if name.endswith("q_proj.weight"): - data_torch = LlamaModel.permute(data_torch, n_head, n_head) - if name.endswith("k_proj.weight"): - data_torch = LlamaModel.permute(data_torch, n_head, n_kv_head) - - # process the experts separately - if name.find("block_sparse_moe.experts") != -1: - n_experts = self.hparams["num_local_experts"] - - assert bid is not None - - if self._experts is None: - self._experts = [{} for _ in range(self.block_count)] - - self._experts[bid][name] = data_torch - - if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - - # merge the experts into a single 3d tensor - for wid in ["w1", "w2", "w3"]: - datas: list[Tensor] = [] - - for xid in range(n_experts): - ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.{wid}.weight" - datas.append(self._experts[bid][ename]) - del self._experts[bid][ename] - - data_torch = torch.stack(datas, dim=0) - - merged_name = f"layers.{bid}.feed_forward.experts.{wid}.weight" - - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors - else: - return [] - - return [(self.map_tensor_name(name), data_torch)] - - def prepare_tensors(self): - super().prepare_tensors() - - if self._experts is not None: - # flatten `list[dict[str, Tensor]]` into `list[str]` - experts = [k for d in self._experts for k in d.keys()] - if len(experts) > 0: - raise ValueError(f"Unprocessed experts: {experts}") - - -@Model.register("DeepseekV2ForCausalLM") -class DeepseekV2Model(Model): - model_arch = gguf.MODEL_ARCH.DEEPSEEK2 - - def set_vocab(self): - self._set_vocab_gpt2() - - def set_gguf_parameters(self): - super().set_gguf_parameters() - hparams = self.hparams - - self.gguf_writer.add_leading_dense_block_count(hparams["first_k_dense_replace"]) - self.gguf_writer.add_vocab_size(hparams["vocab_size"]) - if "q_lora_rank" in hparams and hparams["q_lora_rank"] is not None: - self.gguf_writer.add_q_lora_rank(hparams["q_lora_rank"]) - self.gguf_writer.add_kv_lora_rank(hparams["kv_lora_rank"]) - self.gguf_writer.add_key_length(hparams["qk_nope_head_dim"] + hparams["qk_rope_head_dim"]) - self.gguf_writer.add_value_length(hparams["v_head_dim"]) - self.gguf_writer.add_expert_feed_forward_length(hparams["moe_intermediate_size"]) - self.gguf_writer.add_expert_count(hparams["n_routed_experts"]) - self.gguf_writer.add_expert_shared_count(hparams["n_shared_experts"]) - self.gguf_writer.add_expert_weights_scale(hparams["routed_scaling_factor"]) - self.gguf_writer.add_rope_dimension_count(hparams["qk_rope_head_dim"]) - - if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: - if self.hparams["rope_scaling"].get("type") == "yarn": - self.gguf_writer.add_rope_scaling_type(gguf.RopeScalingType.YARN) - self.gguf_writer.add_rope_scaling_factor(self.hparams["rope_scaling"]["factor"]) - self.gguf_writer.add_rope_scaling_orig_ctx_len(self.hparams["rope_scaling"]["original_max_position_embeddings"]) - self.gguf_writer.add_rope_scaling_yarn_log_mul(0.1 * hparams["rope_scaling"]["mscale_all_dim"]) - - _experts: list[dict[str, Tensor]] | None = None - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - # process the experts separately - if name.find("mlp.experts") != -1: - n_experts = self.hparams["n_routed_experts"] - assert bid is not None - - if self._experts is None: - self._experts = [{} for _ in range(self.block_count)] - - self._experts[bid][name] = data_torch - - if len(self._experts[bid]) >= n_experts * 3: - tensors: list[tuple[str, Tensor]] = [] - - # merge the experts into a single 3d tensor - for w_name in ["down_proj", "gate_proj", "up_proj"]: - datas: list[Tensor] = [] - - for xid in range(n_experts): - ename = f"model.layers.{bid}.mlp.experts.{xid}.{w_name}.weight" - datas.append(self._experts[bid][ename]) - del self._experts[bid][ename] - - data_torch = torch.stack(datas, dim=0) - - merged_name = f"model.layers.{bid}.mlp.experts.{w_name}.weight" - - new_name = self.map_tensor_name(merged_name) - - tensors.append((new_name, data_torch)) - return tensors - else: - return [] - - return [(self.map_tensor_name(name), data_torch)] - - def prepare_tensors(self): - super().prepare_tensors() - - if self._experts is not None: - # flatten `list[dict[str, Tensor]]` into `list[str]` - experts = [k for d in self._experts for k in d.keys()] - if len(experts) > 0: - raise ValueError(f"Unprocessed experts: {experts}") - - -@Model.register("T5WithLMHeadModel") -@Model.register("T5ForConditionalGeneration") -@Model.register("MT5ForConditionalGeneration") -@Model.register("UMT5ForConditionalGeneration") -class T5Model(Model): - model_arch = gguf.MODEL_ARCH.T5 - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.shared_token_embeddings_found = False - - def set_vocab(self): - # to avoid TypeError: Descriptors cannot be created directly - # exception when importing sentencepiece_model_pb2 - os.environ["PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION"] = "python" - from sentencepiece import SentencePieceProcessor - from sentencepiece import sentencepiece_model_pb2 as model - - tokenizer_path = self.dir_model / 'tokenizer.model' - - # many older models use spiece.model tokenizer model filename - if not tokenizer_path.is_file(): - tokenizer_path = self.dir_model / 'spiece.model' - - if not tokenizer_path.is_file(): - raise FileNotFoundError(f"File not found: {tokenizer_path}") - - sentencepiece_model = model.ModelProto() # pyright: ignore[reportAttributeAccessIssue] - sentencepiece_model.ParseFromString(open(tokenizer_path, "rb").read()) - - # some models like Pile-T5 family use BPE tokenizer instead of Unigram - if sentencepiece_model.trainer_spec.model_type == 2: # BPE - # assure the tokenizer model file name is correct - assert tokenizer_path.name == 'tokenizer.model' - return self._set_vocab_sentencepiece() - else: - assert sentencepiece_model.trainer_spec.model_type == 1 # UNIGRAM - - add_prefix = sentencepiece_model.normalizer_spec.add_dummy_prefix - remove_whitespaces = sentencepiece_model.normalizer_spec.remove_extra_whitespaces - precompiled_charsmap = sentencepiece_model.normalizer_spec.precompiled_charsmap - - tokenizer = SentencePieceProcessor() - tokenizer.LoadFromFile(str(tokenizer_path)) - - vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) - - tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)] - scores: list[float] = [-10000.0] * vocab_size - toktypes: list[int] = [SentencePieceTokenTypes.UNUSED] * vocab_size - - for token_id in range(tokenizer.vocab_size()): - piece = tokenizer.IdToPiece(token_id) - text = piece.encode("utf-8") - score = tokenizer.GetScore(token_id) - - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.IsUnknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.IsControl(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.IsUnused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.IsByte(token_id): - toktype = SentencePieceTokenTypes.BYTE - - tokens[token_id] = text - scores[token_id] = score - toktypes[token_id] = toktype - - added_tokens_file = self.dir_model / 'added_tokens.json' - if added_tokens_file.is_file(): - with open(added_tokens_file, "r", encoding="utf-8") as f: - added_tokens_json = json.load(f) - for key in added_tokens_json: - token_id = added_tokens_json[key] - if token_id >= vocab_size: - logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}') - continue - - tokens[token_id] = key.encode("utf-8") - scores[token_id] = -1000.0 - toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED - - if vocab_size > len(tokens): - pad_count = vocab_size - len(tokens) - logger.debug(f"Padding vocab with {pad_count} token(s) - [PAD1] through [PAD{pad_count}]") - for i in range(1, pad_count + 1): - tokens.append(bytes(f"[PAD{i}]", encoding="utf-8")) - scores.append(-1000.0) - toktypes.append(SentencePieceTokenTypes.UNUSED) - - self.gguf_writer.add_tokenizer_model("t5") - self.gguf_writer.add_tokenizer_pre("default") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - self.gguf_writer.add_add_space_prefix(add_prefix) - self.gguf_writer.add_remove_extra_whitespaces(remove_whitespaces) - if precompiled_charsmap: - self.gguf_writer.add_precompiled_charsmap(precompiled_charsmap) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - self.gguf_writer.add_add_bos_token(False) - self.gguf_writer.add_add_eos_token(True) - - def set_gguf_parameters(self): - if (n_ctx := self.find_hparam(["n_positions"], optional=True)) is None: - logger.warning("Couldn't find context length in config.json, assuming default value of 512") - n_ctx = 512 - self.gguf_writer.add_context_length(n_ctx) - self.gguf_writer.add_embedding_length(self.hparams["d_model"]) - self.gguf_writer.add_feed_forward_length(self.hparams["d_ff"]) - self.gguf_writer.add_block_count(self.hparams["num_layers"]) - self.gguf_writer.add_head_count(self.hparams["num_heads"]) - self.gguf_writer.add_key_length(self.hparams["d_kv"]) - self.gguf_writer.add_value_length(self.hparams["d_kv"]) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_relative_attn_buckets_count(self.hparams["relative_attention_num_buckets"]) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_decoder_start_token_id(self.hparams["decoder_start_token_id"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - # T5 based models contain shared token embeddings tensors saved randomly as either "encoder.embed_tokens.weight", - # "decoder.embed_tokens.weight" or "shared.weight" tensor. In some models there are even multiple of them stored - # in the safetensors files. We use the first tensor from these three as the token embeddings for both encoder - # and decoder and ignore the remaining ones. - if name in ["decoder.embed_tokens.weight", "encoder.embed_tokens.weight", "shared.weight"]: - if not self.shared_token_embeddings_found: - name = "shared.weight" - self.shared_token_embeddings_found = True - else: - logger.debug(f"Skipping shared tensor {name!r} in safetensors so that convert can end normally.") - return [] - - return [(self.map_tensor_name(name), data_torch)] - - -@Model.register("JAISLMHeadModel") -class JaisModel(Model): - model_arch = gguf.MODEL_ARCH.JAIS - - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - # SwigLU activation - assert self.hparams["activation_function"] == "swiglu" - # ALiBi position embedding - assert self.hparams["position_embedding_type"] == "alibi" - - # Embeddings scale - self.embeddings_scale = 1.0 - # note: For some JAIS flavors, output is tied to (same as) wte in original model - self.output_is_wte = False - if 'mup_embeddings_scale' in self.hparams: - self.output_is_wte = True # Hack (?) - self.embeddings_scale = self.hparams['mup_embeddings_scale'] - elif 'embeddings_scale' in self.hparams: - self.embeddings_scale = self.hparams['embeddings_scale'] - else: - assert False - - self.width_scale = 1.0 - if 'mup_output_alpha' in self.hparams: - assert 'mup_width_scale' in self.hparams - self.width_scale = self.hparams['mup_output_alpha'] * self.hparams['mup_width_scale'] - elif 'width_scale' in self.hparams: - self.width_scale = self.hparams['width_scale'] - else: - assert False - - self.max_alibi_bias = 8.0 - - def set_vocab(self): - self._set_vocab_gpt2() - - def set_gguf_parameters(self): - self.gguf_writer.add_block_count(self.hparams["n_layer"]) - self.gguf_writer.add_context_length(self.hparams["n_positions"]) - self.gguf_writer.add_embedding_length(self.hparams["n_embd"]) - self.gguf_writer.add_feed_forward_length(self.hparams["n_inner"]) - self.gguf_writer.add_head_count(self.hparams["n_head"]) - self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - tensors: list[tuple[str, Tensor]] = [] - - # we don't need these - if name.endswith((".attn.bias")): - return tensors - - if name.endswith(("relative_pe.slopes")): - # Calculate max ALiBi bias (this is the inverse of the ALiBi calculation) - # Some other models has max_alibi_bias spelled out explicitly in the hyperparams, - # but Jais's PyTorch model simply precalculates the slope values and places them - # in relative_pes.slopes - n_head_closest_log2 = 2 ** math.floor(math.log2(self.hparams["n_head"])) - first_val = float(data_torch[0].item()) - self.max_alibi_bias = -round(math.log2(first_val) * n_head_closest_log2) - - return tensors - - if name.endswith((".c_attn.weight", ".c_proj.weight", ".c_fc.weight", ".c_fc2.weight")): - data_torch = data_torch.transpose(1, 0) - - new_name = self.map_tensor_name(name) - - if new_name == self.format_tensor_name(gguf.MODEL_TENSOR.TOKEN_EMBD): - tensors.append((new_name, data_torch * self.embeddings_scale)) - if self.output_is_wte: - tensors.append((self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT), data_torch * self.width_scale)) - elif new_name == self.format_tensor_name(gguf.MODEL_TENSOR.OUTPUT): - assert not self.output_is_wte - tensors.append((new_name, data_torch * self.width_scale)) - else: - tensors.append((new_name, data_torch)) - - return tensors - - def prepare_tensors(self): - super().prepare_tensors() - self.gguf_writer.add_max_alibi_bias(self.max_alibi_bias) - - -@Model.register("ChatGLMModel", "ChatGLMForConditionalGeneration") -class ChatGLMModel(Model): - model_arch = gguf.MODEL_ARCH.CHATGLM - - def set_vocab_chatglm3(self): - dir_model = self.dir_model - hparams = self.hparams - tokens: list[bytes] = [] - toktypes: list[int] = [] - scores: list[float] = [] - - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) - vocab_size = hparams.get("padded_vocab_size", len(tokenizer.get_vocab())) - assert max(tokenizer.get_vocab().values()) < vocab_size - role_special_tokens = ["<|system|>", "<|user|>", "<|assistant|>", "<|observation|>"] - special_tokens = ["[MASK]", "[gMASK]", "[sMASK]", "sop", "eop"] + role_special_tokens - for token_id in range(vocab_size): - piece = tokenizer._convert_id_to_token(token_id) - if token_id == 0: - piece = "" - elif token_id == 1: - piece = "" - elif token_id == 2: - piece = "" - - text = piece.encode("utf-8") - score = 0.0 - # Referencing the tokenizer Python implementation(https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py), - # it is only valid if it is less than tokenizer.tokenizer.sp_model.vocab_size() - if len(piece) != 0 and token_id < tokenizer.tokenizer.sp_model.vocab_size(): - score = tokenizer.tokenizer.sp_model.get_score(token_id) - - if token_id >= tokenizer.tokenizer.sp_model.vocab_size(): - if piece in special_tokens: - toktype = SentencePieceTokenTypes.CONTROL - elif len(piece) == 0: - text = f"[PAD{token_id}]".encode("utf-8") - toktype = SentencePieceTokenTypes.UNUSED - else: - toktype = SentencePieceTokenTypes.USER_DEFINED - tokens.append(text) - scores.append(score) - toktypes.append(toktype) - continue - - toktype = SentencePieceTokenTypes.NORMAL - if tokenizer.tokenizer.sp_model.is_unknown(token_id): - toktype = SentencePieceTokenTypes.UNKNOWN - elif tokenizer.tokenizer.sp_model.is_control(token_id): - toktype = SentencePieceTokenTypes.CONTROL - elif tokenizer.tokenizer.sp_model.is_unused(token_id): - toktype = SentencePieceTokenTypes.UNUSED - elif tokenizer.tokenizer.sp_model.is_byte(token_id): - toktype = SentencePieceTokenTypes.BYTE - - tokens.append(text) - scores.append(score) - toktypes.append(toktype) - - self.gguf_writer.add_tokenizer_model("llama") - # glm3 needs prefix and suffix formatted as: - # prompt = "[gMASK]sop<|user|>\n" + prompt + "<|assistant|>" - self.gguf_writer.add_tokenizer_pre("chatglm-spm") - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_scores(scores) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(self.dir_model, n_vocab=len(tokens)) - special_vocab.add_to_gguf(self.gguf_writer) - - @staticmethod - def token_bytes_to_string(b): - from transformers.models.gpt2.tokenization_gpt2 import bytes_to_unicode - byte_encoder = bytes_to_unicode() - return ''.join([byte_encoder[ord(char)] for char in b.decode('latin-1')]) - - @staticmethod - def bpe(mergeable_ranks: dict[bytes, int], token: bytes, max_rank: int | None = None) -> list[bytes]: - parts = [bytes([b]) for b in token] - while True: - min_idx = None - min_rank = None - for i, pair in enumerate(zip(parts[:-1], parts[1:])): - rank = mergeable_ranks.get(pair[0] + pair[1]) - if rank is not None and (min_rank is None or rank < min_rank): - min_idx = i - min_rank = rank - if min_rank is None or (max_rank is not None and min_rank >= max_rank): - break - assert min_idx is not None - parts = parts[:min_idx] + [parts[min_idx] + parts[min_idx + 1]] + parts[min_idx + 2:] - return parts - - def set_vocab(self): - if "THUDM/chatglm3-6b" in self.hparams.get("_name_or_path", ""): - self.set_vocab_chatglm3() - return - - dir_model = self.dir_model - hparams = self.hparams - tokens: list[str] = [] - toktypes: list[int] = [] - - from transformers import AutoTokenizer - tokenizer = AutoTokenizer.from_pretrained(dir_model, trust_remote_code=True) - vocab_size = hparams["padded_vocab_size"] - assert max(tokenizer.get_vocab().values()) < vocab_size - - tokpre = self.get_vocab_base_pre(tokenizer) - - merges = [] - vocab = {} - mergeable_ranks = tokenizer.mergeable_ranks - for token, rank in mergeable_ranks.items(): - vocab[ChatGLMModel.token_bytes_to_string(token)] = rank - if len(token) == 1: - continue - merged = ChatGLMModel.bpe(mergeable_ranks, token, max_rank=rank) - assert len(merged) >= 2 and len(merged) <= 7 - merges.append(' '.join(map(ChatGLMModel.token_bytes_to_string, merged))) - - # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined - added_vocab = tokenizer.get_added_vocab() - reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()} - - for i in range(vocab_size): - if i not in reverse_vocab: - tokens.append(f"[PAD{i}]") - toktypes.append(gguf.TokenType.UNUSED) - elif reverse_vocab[i] in added_vocab: - tokens.append(reverse_vocab[i]) - if tokenizer.added_tokens_decoder[i].special: - toktypes.append(gguf.TokenType.CONTROL) - else: - toktypes.append(gguf.TokenType.USER_DEFINED) - else: - tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.NORMAL) - - self.gguf_writer.add_tokenizer_model("gpt2") - self.gguf_writer.add_tokenizer_pre(tokpre) - self.gguf_writer.add_token_list(tokens) - self.gguf_writer.add_token_types(toktypes) - - special_vocab = gguf.SpecialVocab(dir_model, load_merges=False) - special_vocab.merges = merges - # only add special tokens when they were not already loaded from config.json - special_vocab._set_special_token("eos", tokenizer.get_added_vocab()["<|endoftext|>"]) - special_vocab._set_special_token("eot", tokenizer.get_added_vocab()["<|user|>"]) - # this one is usually not in config.json anyway - special_vocab._set_special_token("unk", tokenizer.get_added_vocab()["<|endoftext|>"]) - special_vocab.add_to_gguf(self.gguf_writer) - - def set_gguf_parameters(self): - n_embed = self.hparams.get("hidden_size", self.hparams.get("n_embed")) - n_head = self.hparams.get("n_head", self.hparams.get("num_attention_heads")) - n_head_kv = self.hparams.get("multi_query_group_num", n_head) - self.gguf_writer.add_context_length(self.hparams.get("seq_length", n_embed)) - self.gguf_writer.add_embedding_length(n_embed) - self.gguf_writer.add_feed_forward_length(self.hparams.get("ffn_hidden_size", 4 * n_embed)) - self.gguf_writer.add_block_count(self.hparams["num_layers"]) - self.gguf_writer.add_head_count(n_head) - self.gguf_writer.add_head_count_kv(n_head_kv) - self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layernorm_epsilon"]) - self.gguf_writer.add_file_type(self.ftype) - self.gguf_writer.add_rope_dimension_count(64) - self.gguf_writer.add_add_bos_token(False) - rope_freq = 10000 - if "rope_ratio" in self.hparams: - rope_freq = rope_freq * self.hparams["rope_ratio"] - self.gguf_writer.add_rope_freq_base(rope_freq) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - del bid # unused - - if name.endswith(".rotary_pos_emb.inv_freq"): - return [] - - name = name.removeprefix("transformer.") - return [(self.map_tensor_name(name), data_torch)] - -###### CONVERSION LOGIC ###### - - -# tree of lazy tensors -class LazyTorchTensor(gguf.LazyBase): - _tensor_type = torch.Tensor - # to keep the type-checker happy - dtype: torch.dtype - shape: torch.Size - - # only used when converting a torch.Tensor to a np.ndarray - _dtype_map: dict[torch.dtype, type] = { - torch.float16: np.float16, - torch.float32: np.float32, - } - - # used for safetensors slices - # ref: https://github.com/huggingface/safetensors/blob/079781fd0dc455ba0fe851e2b4507c33d0c0d407/bindings/python/src/lib.rs#L1046 - # TODO: uncomment U64, U32, and U16, ref: https://github.com/pytorch/pytorch/issues/58734 - _dtype_str_map: dict[str, torch.dtype] = { - "F64": torch.float64, - "F32": torch.float32, - "BF16": torch.bfloat16, - "F16": torch.float16, - # "U64": torch.uint64, - "I64": torch.int64, - # "U32": torch.uint32, - "I32": torch.int32, - # "U16": torch.uint16, - "I16": torch.int16, - "U8": torch.uint8, - "I8": torch.int8, - "BOOL": torch.bool, - "F8_E4M3": torch.float8_e4m3fn, - "F8_E5M2": torch.float8_e5m2, - } - - def numpy(self) -> gguf.LazyNumpyTensor: - dtype = self._dtype_map[self.dtype] - return gguf.LazyNumpyTensor( - meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape), - args=(self,), - func=(lambda s: s.numpy()) - ) - - @classmethod - def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: tuple[int, ...]) -> Tensor: - return torch.empty(size=shape, dtype=dtype, device="meta") - - @classmethod - def from_safetensors_slice(cls, st_slice: Any) -> Tensor: - dtype = cls._dtype_str_map[st_slice.get_dtype()] - shape: tuple[int, ...] = tuple(st_slice.get_shape()) - lazy = cls(meta=cls.meta_with_dtype_and_shape(dtype, shape), args=(st_slice,), func=lambda s: s[:]) - return cast(torch.Tensor, lazy) - - @classmethod - def __torch_function__(cls, func, types, args=(), kwargs=None): - del types # unused - - if kwargs is None: - kwargs = {} - - if func is torch.Tensor.numpy: - return args[0].numpy() - - return cls._wrap_fn(func)(*args, **kwargs) - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser( - description="Convert a huggingface model to a GGML compatible file") - parser.add_argument( - "--vocab-only", action="store_true", - help="extract only the vocab", - ) - parser.add_argument( - "--outfile", type=Path, - help="path to write to; default: based on input. {ftype} will be replaced by the outtype.", - ) - parser.add_argument( - "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "auto"], default="f16", - help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type", - ) - parser.add_argument( - "--bigendian", action="store_true", - help="model is executed on big endian machine", - ) - parser.add_argument( - "model", type=Path, - help="directory containing model file", - ) - parser.add_argument( - "--use-temp-file", action="store_true", - help="use the tempfile library while processing (helpful when running out of memory, process killed)", - ) - parser.add_argument( - "--no-lazy", action="store_true", - help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)", - ) - parser.add_argument( - "--model-name", type=str, default=None, - help="name of the model", - ) - parser.add_argument( - "--verbose", action="store_true", - help="increase output verbosity", - ) - parser.add_argument( - "--split-max-tensors", type=int, default=0, - help="max tensors in each split", - ) - parser.add_argument( - "--split-max-size", type=str, default="0", - help="max size per split N(M|G)", - ) - parser.add_argument( - "--dry-run", action="store_true", - help="only print out a split plan and exit, without writing any new files", - ) - parser.add_argument( - "--no-tensor-first-split", action="store_true", - help="do not add tensors to the first split (disabled by default)" - ) - parser.add_argument( - "--metadata", type=Path, - help="Specify the path for an authorship metadata override file" - ) - - return parser.parse_args() - - -def split_str_to_n_bytes(split_str: str) -> int: - if split_str.endswith("K"): - n = int(split_str[:-1]) * 1000 - elif split_str.endswith("M"): - n = int(split_str[:-1]) * 1000 * 1000 - elif split_str.endswith("G"): - n = int(split_str[:-1]) * 1000 * 1000 * 1000 - elif split_str.isnumeric(): - n = int(split_str) - else: - raise ValueError(f"Invalid split size: {split_str}, must be a number, optionally followed by K, M, or G") - - if n < 0: - raise ValueError(f"Invalid split size: {split_str}, must be positive") - - return n - - -def main() -> None: - args = parse_args() - - if args.verbose: - logging.basicConfig(level=logging.DEBUG) - else: - logging.basicConfig(level=logging.INFO) - - dir_model = args.model - - if not dir_model.is_dir(): - logger.error(f'Error: {args.model} is not a directory') - sys.exit(1) - - ftype_map: dict[str, gguf.LlamaFileType] = { - "f32": gguf.LlamaFileType.ALL_F32, - "f16": gguf.LlamaFileType.MOSTLY_F16, - "bf16": gguf.LlamaFileType.MOSTLY_BF16, - "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0, - "auto": gguf.LlamaFileType.GUESSED, - } - - is_split = args.split_max_tensors > 0 or args.split_max_size != "0" - if args.use_temp_file and is_split: - logger.error("Error: Cannot use temp file when splitting") - sys.exit(1) - - if args.outfile is not None: - fname_out = args.outfile - else: - fname_out = dir_model - - logger.info(f"Loading model: {dir_model.name}") - - hparams = Model.load_hparams(dir_model) - - with torch.inference_mode(): - output_type = ftype_map[args.outtype] - model_architecture = hparams["architectures"][0] - - try: - model_class = Model.from_model_architecture(model_architecture) - except NotImplementedError: - logger.error(f"Model {model_architecture} is not supported") - sys.exit(1) - - model_instance = model_class(dir_model=dir_model, ftype=output_type, fname_out=fname_out, - is_big_endian=args.bigendian, use_temp_file=args.use_temp_file, - eager=args.no_lazy, - metadata_override=args.metadata, model_name=args.model_name, - split_max_tensors=args.split_max_tensors, - split_max_size=split_str_to_n_bytes(args.split_max_size), dry_run=args.dry_run, - small_first_shard=args.no_tensor_first_split) - - if args.vocab_only: - logger.info("Exporting model vocab...") - model_instance.write_vocab() - logger.info(f"Model vocab successfully exported to {model_instance.fname_out}") - else: - logger.info("Exporting model...") - model_instance.write() - out_path = f"{model_instance.fname_out.parent}{os.sep}" if is_split else model_instance.fname_out - logger.info(f"Model successfully exported to {out_path}") - - -if __name__ == '__main__': - main() diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py deleted file mode 100755 index d5a2d925e..000000000 --- a/convert_hf_to_gguf_update.py +++ /dev/null @@ -1,351 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -# This script downloads the tokenizer models of the specified models from Huggingface and -# generates the get_vocab_base_pre() function for convert_hf_to_gguf.py -# -# This is necessary in order to analyze the type of pre-tokenizer used by the model and -# provide the necessary information to llama.cpp via the GGUF header in order to implement -# the same pre-tokenizer. -# -# ref: https://github.com/ggerganov/llama.cpp/pull/6920 -# -# Instructions: -# -# - Add a new model to the "models" list -# - Run the script with your huggingface token: -# -# python3 convert_hf_to_gguf_update.py -# -# - Copy-paste the generated get_vocab_base_pre() function into convert_hf_to_gguf.py -# - Update llama.cpp with the new pre-tokenizer if necessary -# -# TODO: generate tokenizer tests for llama.cpp -# - -import logging -import os -import pathlib -import re - -import requests -import sys -import json - -from hashlib import sha256 -from enum import IntEnum, auto -from transformers import AutoTokenizer - -logging.basicConfig(level=logging.DEBUG) -logger = logging.getLogger("convert_hf_to_gguf_update") -sess = requests.Session() - - -class TOKENIZER_TYPE(IntEnum): - SPM = auto() - BPE = auto() - WPM = auto() - UGM = auto() - - -# TODO: this string has to exercise as much pre-tokenizer functionality as possible -# will be updated with time - contributions welcome -CHK_TXT = '\n \n\n \n\n\n \t \t\t \t\n \n \n \n \n🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български \'\'\'\'\'\'```````\"\"\"\"......!!!!!!?????? I\'ve been \'told he\'s there, \'RE you sure? \'M not sure I\'ll make it, \'D you like some tea? We\'Ve a\'lL' - -if len(sys.argv) == 2: - token = sys.argv[1] - if not token.startswith("hf_"): - logger.info("Huggingface token seems invalid") - logger.info("Usage: python convert_hf_to_gguf_update.py ") - sys.exit(1) -else: - logger.info("Usage: python convert_hf_to_gguf_update.py ") - sys.exit(1) - -# TODO: add models here, base models preferred -models = [ - {"name": "llama-spm", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/meta-llama/Llama-2-7b-hf", }, - {"name": "llama-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/meta-llama/Meta-Llama-3-8B", }, - {"name": "phi-3", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/microsoft/Phi-3-mini-4k-instruct", }, - {"name": "deepseek-llm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-llm-7b-base", }, - {"name": "deepseek-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/deepseek-coder-6.7b-base", }, - {"name": "falcon", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/tiiuae/falcon-7b", }, - {"name": "bert-bge", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/BAAI/bge-small-en-v1.5", }, - {"name": "mpt", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mosaicml/mpt-7b", }, - {"name": "starcoder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/bigcode/starcoder2-3b", }, - {"name": "gpt-2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/openai-community/gpt2", }, - {"name": "stablelm2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/stabilityai/stablelm-2-zephyr-1_6b", }, - {"name": "refact", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/smallcloudai/Refact-1_6-base", }, - {"name": "command-r", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/CohereForAI/c4ai-command-r-v01", }, - {"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", }, - {"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", }, - {"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", }, - {"name": "jina-v2-en", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM! - {"name": "jina-v2-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", }, - {"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", }, - {"name": "smaug-bpe", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/abacusai/Smaug-Llama-3-70B-Instruct", }, - {"name": "poro-chat", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Poro-34B-chat", }, - {"name": "jina-v2-code", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-code", }, - {"name": "viking", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/LumiOpen/Viking-7B", }, # Also used for Viking 13B and 33B - {"name": "gemma", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/google/gemma-2b", }, - {"name": "gemma-2", "tokt": TOKENIZER_TYPE.SPM, "repo": "https://huggingface.co/google/gemma-2-9b", }, - {"name": "jais", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/core42/jais-13b", }, - {"name": "t5", "tokt": TOKENIZER_TYPE.UGM, "repo": "https://huggingface.co/google-t5/t5-small", }, - {"name": "codeshell", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/WisdomShell/CodeShell-7B", }, - {"name": "tekken", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistralai/Mistral-Nemo-Base-2407", }, - {"name": "smollm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/HuggingFaceTB/SmolLM-135M", }, -] - - -def download_file_with_auth(url, token, save_path): - headers = {"Authorization": f"Bearer {token}"} - response = sess.get(url, headers=headers) - response.raise_for_status() - os.makedirs(os.path.dirname(save_path), exist_ok=True) - with open(save_path, 'wb') as downloaded_file: - downloaded_file.write(response.content) - logger.info(f"File {save_path} downloaded successfully") - - -def download_model(model): - name = model["name"] - repo = model["repo"] - tokt = model["tokt"] - - os.makedirs(f"models/tokenizers/{name}", exist_ok=True) - - files = ["config.json", "tokenizer.json", "tokenizer_config.json"] - - if tokt == TOKENIZER_TYPE.SPM: - files.append("tokenizer.model") - - if tokt == TOKENIZER_TYPE.UGM: - files.append("spiece.model") - - for file in files: - save_path = f"models/tokenizers/{name}/{file}" - if os.path.isfile(save_path): - logger.info(f"{name}: File {save_path} already exists - skipping") - continue - download_file_with_auth(f"{repo}/resolve/main/{file}", token, save_path) - - -for model in models: - try: - download_model(model) - except Exception as e: - logger.error(f"Failed to download model {model['name']}. Error: {e}") - - -# generate the source code for the convert_hf_to_gguf.py:get_vocab_base_pre() function: - -src_ifs = "" -for model in models: - name = model["name"] - tokt = model["tokt"] - - if tokt == TOKENIZER_TYPE.SPM or tokt == TOKENIZER_TYPE.UGM: - continue - - # Skip if the tokenizer folder does not exist or there are other download issues previously - if not os.path.exists(f"models/tokenizers/{name}"): - logger.warning(f"Directory for tokenizer {name} not found. Skipping...") - continue - - # create the tokenizer - try: - if name == "t5": - tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False) - else: - tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}") - except OSError as e: - logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}") - continue # Skip to the next model if the tokenizer can't be loaded - - chktok = tokenizer.encode(CHK_TXT) - chkhsh = sha256(str(chktok).encode()).hexdigest() - - logger.info(f"model: {name}") - logger.info(f"tokt: {tokt}") - logger.info(f"repo: {model['repo']}") - logger.info(f"chktok: {chktok}") - logger.info(f"chkhsh: {chkhsh}") - - # print the "pre_tokenizer" content from the tokenizer.json - with open(f"models/tokenizers/{name}/tokenizer.json", "r", encoding="utf-8") as f: - cfg = json.load(f) - normalizer = cfg["normalizer"] - logger.info("normalizer: " + json.dumps(normalizer, indent=4)) - pre_tokenizer = cfg["pre_tokenizer"] - logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4)) - if "ignore_merges" in cfg["model"]: - logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4)) - - logger.info("") - - src_ifs += f" if chkhsh == \"{chkhsh}\":\n" - src_ifs += f" # ref: {model['repo']}\n" - src_ifs += f" res = \"{name}\"\n" - -src_func = f""" - def get_vocab_base_pre(self, tokenizer) -> str: - # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that - # is specific for the BPE pre-tokenizer used by the model - # we will use this unique identifier to write a "tokenizer.ggml.pre" entry in the GGUF file which we can - # use in llama.cpp to implement the same pre-tokenizer - - chktxt = {repr(CHK_TXT)} - - chktok = tokenizer.encode(chktxt) - chkhsh = sha256(str(chktok).encode()).hexdigest() - - logger.debug(f"chktok: {{chktok}}") - logger.debug(f"chkhsh: {{chkhsh}}") - - res = None - - # NOTE: if you get an error here, you need to update the convert_hf_to_gguf_update.py script - # or pull the latest version of the model from Huggingface - # don't edit the hashes manually! -{src_ifs} - if res is None: - logger.warning("\\n") - logger.warning("**************************************************************************************") - logger.warning("** WARNING: The BPE pre-tokenizer was not recognized!") - logger.warning("** There are 2 possible reasons for this:") - logger.warning("** - the model has not been added to convert_hf_to_gguf_update.py yet") - logger.warning("** - the pre-tokenization config has changed upstream") - logger.warning("** Check your model files and convert_hf_to_gguf_update.py and update them accordingly.") - logger.warning("** ref: https://github.com/ggerganov/llama.cpp/pull/6920") - logger.warning("**") - logger.warning(f"** chkhsh: {{chkhsh}}") - logger.warning("**************************************************************************************") - logger.warning("\\n") - raise NotImplementedError("BPE pre-tokenizer was not recognized - update get_vocab_base_pre()") - - logger.debug(f"tokenizer.ggml.pre: {{repr(res)}}") - logger.debug(f"chkhsh: {{chkhsh}}") - - return res -""" - -convert_py_pth = pathlib.Path("convert_hf_to_gguf.py") -convert_py = convert_py_pth.read_text(encoding="utf-8") -convert_py = re.sub( - r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)", - lambda m: m.group(1) + src_func + m.group(3), - convert_py, - flags=re.DOTALL | re.MULTILINE, -) - -convert_py_pth.write_text(convert_py, encoding="utf-8") - -logger.info("+++ convert_hf_to_gguf.py was updated") - -# generate tests for each tokenizer model - -tests = [ - "ied 4 ½ months", - "Führer", - "", - " ", - " ", - " ", - "\t", - "\n", - "\n\n", - "\n\n\n", - "\t\n", - "Hello world", - " Hello world", - "Hello World", - " Hello World", - " Hello World!", - "Hello, world!", - " Hello, world!", - " this is 🦙.cpp", - "w048 7tuijk dsdfhu", - "нещо на Български", - "កាន់តែពិសេសអាចខលចេញ", - "🚀 (normal) 😶‍🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)", - "Hello", - " Hello", - " Hello", - " Hello", - " Hello", - " Hello\n Hello", - " (", - "\n =", - "' era", - "Hello, y'all! How are you 😁 ?我想在apple工作1314151天~", - "!!!!!!", - "3", - "33", - "333", - "3333", - "33333", - "333333", - "3333333", - "33333333", - "333333333", - "Cửa Việt", # llama-bpe fails on this - " discards", - CHK_TXT, -] - -# write the tests to ./models/ggml-vocab-{name}.gguf.inp -# the format is: -# -# test0 -# __ggml_vocab_test__ -# test1 -# __ggml_vocab_test__ -# ... -# - -# with each model, encode all tests and write the results in ./models/ggml-vocab-{name}.gguf.out -# for each test, write the resulting tokens on a separate line - -for model in models: - name = model["name"] - tokt = model["tokt"] - - # Skip if the tokenizer folder does not exist or there are other download issues previously - if not os.path.exists(f"models/tokenizers/{name}"): - logger.warning(f"Directory for tokenizer {name} not found. Skipping...") - continue - - # create the tokenizer - try: - if name == "t5": - tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}", use_fast=False) - else: - tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}") - except OSError as e: - logger.error(f"Failed to load tokenizer for model {name}. Error: {e}") - continue # Skip this model and continue with the next one in the loop - - with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f: - for text in tests: - f.write(f"{text}") - f.write("\n__ggml_vocab_test__\n") - - with open(f"models/ggml-vocab-{name}.gguf.out", "w") as f: - for text in tests: - res = tokenizer.encode(text, add_special_tokens=False) - for r in res: - f.write(f" {r}") - f.write("\n") - - logger.info(f"Tests for {name} written in ./models/ggml-vocab-{name}.gguf.*") - -# generate commands for creating vocab files - -logger.info("\nRun the following commands to generate the vocab files for testing:\n") - -for model in models: - name = model["name"] - - print(f"python3 convert_hf_to_gguf.py models/tokenizers/{name}/ --outfile models/ggml-vocab-{name}.gguf --vocab-only") # noqa: NP100 - -logger.info("\n") diff --git a/convert_llama_ggml_to_gguf.py b/convert_llama_ggml_to_gguf.py deleted file mode 100755 index 7b00b4398..000000000 --- a/convert_llama_ggml_to_gguf.py +++ /dev/null @@ -1,450 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import annotations - -import logging -import argparse -import os -import struct -import sys -from enum import IntEnum -from pathlib import Path - -import numpy as np - -if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) -import gguf - -logger = logging.getLogger("ggml-to-gguf") - - -class GGMLFormat(IntEnum): - GGML = 0 - GGMF = 1 - GGJT = 2 - - -class GGMLFType(IntEnum): - ALL_F32 = 0 - MOSTLY_F16 = 1 - MOSTLY_Q4_0 = 2 - MOSTLY_Q4_1 = 3 - MOSTLY_Q4_1_SOME_F16 = 4 - MOSTLY_Q8_0 = 7 - MOSTLY_Q5_0 = 8 - MOSTLY_Q5_1 = 9 - MOSTLY_Q2_K = 10 - MOSTLY_Q3_K_S = 11 - MOSTLY_Q3_K_M = 12 - MOSTLY_Q3_K_L = 13 - MOSTLY_Q4_K_S = 14 - MOSTLY_Q4_K_M = 15 - MOSTLY_Q5_K_S = 16 - MOSTLY_Q5_K_M = 17 - MOSTLY_Q6_K = 18 - - -class Hyperparameters: - def __init__(self): - self.n_vocab = self.n_embd = self.n_mult = self.n_head = 0 - self.n_layer = self.n_rot = self.n_ff = 0 - self.ftype = GGMLFType.ALL_F32 - - def set_n_ff(self, model): - ff_tensor_idx = model.tensor_map.get(b'layers.0.feed_forward.w1.weight') - assert ff_tensor_idx is not None, 'Missing layer 0 FF tensor' - ff_tensor = model.tensors[ff_tensor_idx] - self.n_ff = ff_tensor.dims[1] - - def load(self, data, offset): - ( - self.n_vocab, - self.n_embd, - self.n_mult, - self.n_head, - self.n_layer, - self.n_rot, - ftype, - ) = struct.unpack('<7I', data[offset:offset + (4 * 7)]) - try: - self.ftype = GGMLFType(ftype) - except ValueError: - raise ValueError(f'Invalid ftype {ftype}') - return 4 * 7 - - def __str__(self): - return f'' - - -class Vocab: - def __init__(self, load_scores = True): - self.items = [] - self.load_scores = load_scores - - def load(self, data, offset, n_vocab): - orig_offset = offset - for _ in range(n_vocab): - itemlen = struct.unpack('= 0 and n_dims <= 4, f'Invalid tensor dimensions {n_dims}' - assert name_len < 4096, 'Absurd tensor name length' - quant = gguf.GGML_QUANT_SIZES.get(dtype) - assert quant is not None, 'Unknown tensor type' - (blksize, tysize) = quant - offset += 12 - self.dtype= dtype - self.dims = struct.unpack(f'<{n_dims}I', data[offset:offset + (4 * n_dims)]) - offset += 4 * n_dims - self.name = bytes(data[offset:offset + name_len]) - offset += name_len - pad = ((offset + 31) & ~31) - offset if self.use_padding else 0 - offset += pad - n_elems = np.prod(self.dims) - n_bytes = np.int64(np.int64(n_elems) * np.int64(tysize)) // np.int64(blksize) - self.start_offset = offset - self.len_bytes = n_bytes - offset += n_bytes - return offset - orig_offset - - -class GGMLModel: - - file_format: GGMLFormat - format_version: int - - def __init__(self): - self.hyperparameters = None - self.vocab = None - self.tensor_map = {} - self.tensors = [] - - def validate_header(self, data, offset): - magic = bytes(data[offset:offset + 4]) - if magic == b'GGUF': - raise ValueError('File is already in GGUF format.') - if magic == b'lmgg': - self.file_format = GGMLFormat.GGML - self.format_version = 1 - return 4 - version = struct.unpack(' 3: - raise ValueError(f'Cannot handle unexpected GGJT file version {version}') - self.file_format = GGMLFormat.GGJT - self.format_version = version - return 8 - raise ValueError(f"Unexpected file magic {magic!r}! This doesn't look like a GGML format file.") - - def validate_conversion(self, ftype): - err = '' - if (self.file_format < GGMLFormat.GGJT or self.format_version < 2): - if ftype not in (GGMLFType.ALL_F32, GGMLFType.MOSTLY_F16): - err = 'Quantizations changed in GGJTv2. Can only convert unquantized GGML files older than GGJTv2.' - elif (self.file_format == GGMLFormat.GGJT and self.format_version == 2): - if ftype in (GGMLFType.MOSTLY_Q4_0, GGMLFType.MOSTLY_Q4_1, - GGMLFType.MOSTLY_Q4_1_SOME_F16, GGMLFType.MOSTLY_Q8_0): - err = 'Q4 and Q8 quantizations changed in GGJTv3.' - if len(err) > 0: - raise ValueError(f'{err} Sorry, your {self.file_format.name}v{self.format_version} file of type {ftype.name} is not eligible for conversion.') - - def load(self, data, offset): - offset += self.validate_header(data, offset) - hp = Hyperparameters() - offset += hp.load(data, offset) - logger.info(f'* File format: {self.file_format.name}v{self.format_version} with ftype {hp.ftype.name}') - self.validate_conversion(hp.ftype) - vocab = Vocab(load_scores = self.file_format > GGMLFormat.GGML) - offset += vocab.load(data, offset, hp.n_vocab) - tensors: list[Tensor] = [] - tensor_map = {} - while offset < len(data): - tensor = Tensor(use_padding = self.file_format > GGMLFormat.GGMF) - offset += tensor.load(data, offset) - tensor_map[tensor.name] = len(tensors) - tensors.append(tensor) - self.hyperparameters = hp - self.vocab = vocab - self.tensors = tensors - self.tensor_map = tensor_map - hp.set_n_ff(self) - return offset - - -class GGMLToGGUF: - def __init__(self, ggml_model, data, cfg, params_override = None, vocab_override = None, special_vocab = None): - hp = ggml_model.hyperparameters - self.model = ggml_model - self.data = data - self.cfg = cfg - self.params_override = params_override - self.vocab_override = vocab_override - self.special_vocab = special_vocab - if params_override is not None: - n_kv_head = params_override.n_head_kv - else: - if cfg.gqa == 1: - n_kv_head = hp.n_head - else: - gqa = float(cfg.gqa) - n_kv_head = None - for x in range(1, 256): - if float(hp.n_head) / float(x) == gqa: - n_kv_head = x - assert n_kv_head is not None, "Couldn't determine n_kv_head from GQA param" - logger.info(f'- Guessed n_kv_head = {n_kv_head} based on GQA {cfg.gqa}') - self.n_kv_head = n_kv_head - self.name_map = gguf.get_tensor_name_map(gguf.MODEL_ARCH.LLAMA, ggml_model.hyperparameters.n_layer) - - def save(self): - logger.info('* Preparing to save GGUF file') - gguf_writer = gguf.GGUFWriter( - self.cfg.output, - gguf.MODEL_ARCH_NAMES[gguf.MODEL_ARCH.LLAMA], - use_temp_file = False) - self.add_params(gguf_writer) - self.add_vocab(gguf_writer) - if self.special_vocab is not None: - self.special_vocab.add_to_gguf(gguf_writer) - self.add_tensors(gguf_writer) - logger.info(" gguf: write header") - gguf_writer.write_header_to_file() - logger.info(" gguf: write metadata") - gguf_writer.write_kv_data_to_file() - logger.info(" gguf: write tensors") - gguf_writer.write_tensors_to_file() - gguf_writer.close() - - def add_params(self, gguf_writer): - hp = self.model.hyperparameters - cfg = self.cfg - if cfg.desc is not None: - desc = cfg.desc - else: - desc = f'converted from legacy {self.model.file_format.name}v{self.model.format_version} {hp.ftype.name} format' - try: - # Filenames aren't necessarily valid UTF8. - name = cfg.name if cfg.name is not None else cfg.input.name - except UnicodeDecodeError: - name = None - logger.info('* Adding model parameters and KV items') - if name is not None: - gguf_writer.add_name(name) - gguf_writer.add_description(desc) - gguf_writer.add_file_type(int(hp.ftype)) - if self.params_override is not None: - po = self.params_override - assert po.n_embd == hp.n_embd, 'Model hyperparams mismatch' - assert po.n_layer == hp.n_layer, 'Model hyperparams mismatch' - assert po.n_head == hp.n_head, 'Model hyperparams mismatch' - gguf_writer.add_context_length (po.n_ctx) - gguf_writer.add_embedding_length (po.n_embd) - gguf_writer.add_block_count (po.n_layer) - gguf_writer.add_feed_forward_length (po.n_ff) - gguf_writer.add_rope_dimension_count(po.n_embd // po.n_head) - gguf_writer.add_head_count (po.n_head) - gguf_writer.add_head_count_kv (po.n_head_kv) - gguf_writer.add_layer_norm_rms_eps (po.f_norm_eps) - return - gguf_writer.add_context_length(cfg.context_length) - gguf_writer.add_embedding_length(hp.n_embd) - gguf_writer.add_block_count(hp.n_layer) - gguf_writer.add_feed_forward_length(hp.n_ff) - gguf_writer.add_rope_dimension_count(hp.n_embd // hp.n_head) - gguf_writer.add_head_count(hp.n_head) - gguf_writer.add_head_count_kv(self.n_kv_head) - gguf_writer.add_layer_norm_rms_eps(float(cfg.eps)) - - def add_vocab(self, gguf_writer): - hp = self.model.hyperparameters - gguf_writer.add_tokenizer_model('llama') - gguf_writer.add_tokenizer_pre('default') - tokens = [] - scores = [] - toktypes = [] - if self.vocab_override is not None: - vo = self.vocab_override - logger.info('* Adding vocab item(s)') - for (_, (vbytes, score, ttype)) in enumerate(vo.all_tokens()): - tokens.append(vbytes) - scores.append(score) - toktypes.append(ttype) - assert len(tokens) == hp.n_vocab, \ - f'Override vocab has a different number of items than hyperparameters - override = {len(tokens)} but n_vocab={hp.n_vocab}' - gguf_writer.add_token_list(tokens) - gguf_writer.add_token_scores(scores) - if len(toktypes) > 0: - gguf_writer.add_token_types(toktypes) - return - logger.info(f'* Adding {hp.n_vocab} vocab item(s)') - assert len(self.model.vocab.items) >= 3, 'Cannot handle unexpectedly short model vocab' - for (tokid, (vbytes, vscore)) in enumerate(self.model.vocab.items): - tt = 1 # Normal - # Special handling for UNK, BOS, EOS tokens. - if tokid <= 2: - if tokid == 0: - vbytes = b'' - tt = 2 - elif tokid == 1: - vbytes = b'' - tt = 3 - else: - vbytes = b'' - tt = 3 - elif len(vbytes) == 0: - tt = 3 # Control - elif tokid >= 3 and tokid <= 258 and len(vbytes) == 1: - vbytes = bytes(f'<0x{vbytes[0]:02X}>', encoding = 'UTF-8') - tt = 6 # Byte - else: - vbytes = vbytes.replace(b' ', b'\xe2\x96\x81') - toktypes.append(tt) - tokens.append(vbytes) - scores.append(vscore) - gguf_writer.add_token_list(tokens) - gguf_writer.add_token_scores(scores) - gguf_writer.add_token_types(toktypes) - gguf_writer.add_unk_token_id(0) - gguf_writer.add_bos_token_id(1) - gguf_writer.add_eos_token_id(2) - - def add_tensors(self, gguf_writer): - tensor_map = self.name_map - data = self.data - logger.info(f'* Adding {len(self.model.tensors)} tensor(s)') - for tensor in self.model.tensors: - name = str(tensor.name, 'UTF-8') - mapped_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias")) - assert mapped_name is not None, f'Bad name {name}' - tempdims = list(tensor.dims[:]) - if len(tempdims) > 1: - temp = tempdims[1] - tempdims[1] = tempdims[0] - tempdims[0] = temp - gguf_writer.add_tensor( - mapped_name, - data[tensor.start_offset:tensor.start_offset + tensor.len_bytes], - raw_shape = tempdims, - raw_dtype = tensor.dtype) - - -def handle_metadata(cfg, hp): - import examples.convert_legacy_llama as convert - - assert cfg.model_metadata_dir.is_dir(), 'Metadata dir is not a directory' - hf_config_path = cfg.model_metadata_dir / "config.json" - orig_config_path = cfg.model_metadata_dir / "params.json" - # We pass a fake model here. "original" mode will check the shapes of some - # tensors if information is missing in the .json file: other than that, the - # model data isn't used so this should be safe (at least for now). - fakemodel = { - 'tok_embeddings.weight': convert.LazyTensor.__new__(convert.LazyTensor), - 'layers.0.feed_forward.w1.weight': convert.LazyTensor.__new__(convert.LazyTensor), - } - fakemodel['tok_embeddings.weight'].shape = [hp.n_vocab] - fakemodel['layers.0.feed_forward.w1.weight'].shape = [hp.n_ff] - if hf_config_path.exists(): - params = convert.Params.loadHFTransformerJson(fakemodel, hf_config_path) - elif orig_config_path.exists(): - params = convert.Params.loadOriginalParamsJson(fakemodel, orig_config_path) - else: - raise ValueError('Unable to load metadata') - vocab_path = Path(cfg.vocab_dir if cfg.vocab_dir is not None else cfg.model_metadata_dir) - vocab_factory = convert.VocabFactory(vocab_path) - vocab, special_vocab = vocab_factory.load_vocab(cfg.vocabtype.split(","), cfg.model_metadata_dir) - convert.check_vocab_size(params, vocab) - return params, vocab, special_vocab - - -def handle_args(): - parser = argparse.ArgumentParser(description = 'Convert GGML models to GGUF') - parser.add_argument('--input', '-i', type = Path, required = True, - help = 'Input GGMLv3 filename') - parser.add_argument('--output', '-o', type = Path, required = True, - help ='Output GGUF filename') - parser.add_argument('--name', - help = 'Set model name') - parser.add_argument('--desc', - help = 'Set model description') - parser.add_argument('--gqa', type = int, default = 1, - help = 'grouped-query attention factor (use 8 for LLaMA2 70B)') - parser.add_argument('--eps', default = '5.0e-06', - help = 'RMS norm eps: Use 1e-6 for LLaMA1 and OpenLLaMA, use 1e-5 for LLaMA2') - parser.add_argument('--context-length', '-c', type=int, default = 2048, - help = 'Default max context length: LLaMA1 is typically 2048, LLaMA2 is typically 4096') - parser.add_argument('--model-metadata-dir', '-m', type = Path, - help ='Load HuggingFace/.pth vocab and metadata from the specified directory') - parser.add_argument("--vocab-dir", type=Path, - help="directory containing tokenizer.model, if separate from model file - only meaningful with --model-metadata-dir") - parser.add_argument("--vocabtype", default="spm,hfft", - help="vocab format - only meaningful with --model-metadata-dir and/or --vocab-dir (default: spm,hfft)") - parser.add_argument("--verbose", action="store_true", help="increase output verbosity") - return parser.parse_args() - - -def main(): - cfg = handle_args() - logging.basicConfig(level=logging.DEBUG if cfg.verbose else logging.INFO) - logger.info(f'* Using config: {cfg}') - logger.warning('=== WARNING === Be aware that this conversion script is best-effort. Use a native GGUF model if possible. === WARNING ===') - if cfg.model_metadata_dir is None and (cfg.gqa == 1 or cfg.eps == '5.0e-06'): - logger.info('- Note: If converting LLaMA2, specifying "--eps 1e-5" is required. 70B models also need "--gqa 8".') - data = np.memmap(cfg.input, mode = 'r') - model = GGMLModel() - logger.info('* Scanning GGML input file') - offset = model.load(data, 0) # noqa - logger.info(f'* GGML model hyperparameters: {model.hyperparameters}') - vocab_override = None - params_override = None - special_vocab = None - if cfg.model_metadata_dir is not None: - (params_override, vocab_override, special_vocab) = handle_metadata(cfg, model.hyperparameters) - logger.info('!! Note: When overriding params the --gqa, --eps and --context-length options are ignored.') - logger.info(f'* Overriding params: {params_override}') - logger.info(f'* Overriding vocab: {vocab_override}') - logger.info(f'* Special vocab: {special_vocab}') - else: - logger.warning('\n=== WARNING === Special tokens may not be converted correctly. Use --model-metadata-dir if possible === WARNING ===\n') - if model.file_format == GGMLFormat.GGML: - logger.info('! This is a very old GGML file that does not contain vocab scores. Strongly recommend using model metadata!') - converter = GGMLToGGUF( - model, data, cfg, - params_override = params_override, - vocab_override = vocab_override, - special_vocab = special_vocab - ) - converter.save() - logger.info(f'* Successful completion. Output saved to: {cfg.output}') - - -if __name__ == '__main__': - main() diff --git a/convert_lora_to_gguf.py b/convert_lora_to_gguf.py deleted file mode 100755 index a88d0d4a9..000000000 --- a/convert_lora_to_gguf.py +++ /dev/null @@ -1,393 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- - -from __future__ import annotations - -from dataclasses import dataclass -import logging -import argparse -import os -import sys -import json -from math import prod -from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable, Iterable, Iterator, Sequence, SupportsIndex, cast - -import torch - -if TYPE_CHECKING: - from torch import Tensor - -if 'NO_LOCAL_GGUF' not in os.environ: - sys.path.insert(1, str(Path(__file__).parent / 'gguf-py')) -import gguf - -# reuse model definitions from convert_hf_to_gguf.py -from convert_hf_to_gguf import LazyTorchTensor, Model - -logger = logging.getLogger("lora-to-gguf") - - -@dataclass -class PartialLoraTensor: - A: Tensor | None = None - B: Tensor | None = None - - -# magic to support tensor shape modifications and splitting -class LoraTorchTensor: - _lora_A: Tensor # (n_rank, row_size) - _lora_B: Tensor # (col_size, n_rank) - _rank: int - - def __init__(self, A: Tensor, B: Tensor): - assert len(A.shape) == len(B.shape) - assert A.shape[-2] == B.shape[-1] - if A.dtype != B.dtype: - A = A.to(torch.float32) - B = B.to(torch.float32) - self._lora_A = A - self._lora_B = B - self._rank = B.shape[-1] - - def get_lora_A_B(self) -> tuple[Tensor, Tensor]: - return (self._lora_A, self._lora_B) - - def __getitem__( - self, - indices: ( - SupportsIndex - | slice - | tuple[SupportsIndex | slice | Tensor, ...] # TODO: add ellipsis in the type signature - ), - ) -> LoraTorchTensor: - shape = self.shape - if isinstance(indices, SupportsIndex): - if len(shape) > 2: - return LoraTorchTensor(self._lora_A[indices], self._lora_B[indices]) - else: - raise NotImplementedError # can't return a vector - elif isinstance(indices, slice): - if len(shape) > 2: - return LoraTorchTensor(self._lora_A[indices], self._lora_B[indices]) - else: - return LoraTorchTensor(self._lora_A, self._lora_B[indices]) - elif isinstance(indices, tuple): - assert len(indices) > 0 - if indices[-1] is Ellipsis: - return self[indices[:-1]] - # expand ellipsis - indices = tuple( - u - for v in ( - ( - (slice(None, None) for _ in range(len(indices) - 1)) - if i is Ellipsis - else (i,) - ) - for i in indices - ) - for u in v - ) - - if len(indices) < len(shape): - indices = (*indices, *(slice(None, None) for _ in range(len(indices), len(shape)))) - - # TODO: make sure this is correct - indices_A = ( - *( - ( - j.__index__() % self._lora_A.shape[i] - if isinstance(j, SupportsIndex) - else slice(None, None) - ) - for i, j in enumerate(indices[:-2]) - ), - slice(None, None), - indices[-1], - ) - indices_B = indices[:-1] - return LoraTorchTensor(self._lora_A[indices_A], self._lora_B[indices_B]) - else: - raise NotImplementedError # unknown indice type - - @property - def dtype(self) -> torch.dtype: - assert self._lora_A.dtype == self._lora_B.dtype - return self._lora_A.dtype - - @property - def shape(self) -> tuple[int, ...]: - assert len(self._lora_A.shape) == len(self._lora_B.shape) - return (*self._lora_B.shape[:-1], self._lora_A.shape[-1]) - - def size(self, dim=None): - assert dim is None - return self.shape - - def reshape(self, *shape: int | tuple[int, ...]) -> LoraTorchTensor: - if isinstance(shape[0], tuple): - new_shape: tuple[int, ...] = shape[0] - else: - new_shape = cast(tuple[int, ...], shape) - orig_shape = self.shape - if len(new_shape) < 2: - raise NotImplementedError # can't become a vector - - # expand -1 in the shape - if any(dim == -1 for dim in new_shape): - n_elems = prod(orig_shape) - n_new_elems = prod(dim if dim != -1 else 1 for dim in new_shape) - assert n_elems % n_new_elems == 0 - new_shape = (*(dim if dim != -1 else n_elems // n_new_elems for dim in new_shape),) - - if new_shape[-1] != orig_shape[-1]: - raise NotImplementedError # can't reshape the row size trivially - - shape_A = (*(1 for _ in new_shape[:-2]), self._rank, orig_shape[-1]) - shape_B = (*new_shape[:-1], self._rank) - return LoraTorchTensor( - self._lora_A.reshape(shape_A), - self._lora_B.reshape(shape_B), - ) - - def reshape_as(self, other: Tensor) -> LoraTorchTensor: - return self.reshape(*other.shape) - - def view(self, *size: int) -> LoraTorchTensor: - return self.reshape(*size) - - def permute(self, *dims: int) -> LoraTorchTensor: - shape = self.shape - dims = tuple(dim - len(shape) if dim >= 0 else dim for dim in dims) - if dims[-1] == -1: - # TODO: support higher dimensional A shapes bigger than 1 - assert all(dim == 1 for dim in self._lora_A.shape[:-2]) - return LoraTorchTensor(self._lora_A, self._lora_B.permute(*dims)) - if len(shape) == 2 and dims[-1] == -2 and dims[-2] == -1: - return LoraTorchTensor(self._lora_B.permute(*dims), self._lora_A.permute(*dims)) - else: - # TODO: compose the above two - raise NotImplementedError - - def transpose(self, dim0: int, dim1: int) -> LoraTorchTensor: - shape = self.shape - dims = [i for i in range(len(shape))] - dims[dim0], dims[dim1] = dims[dim1], dims[dim0] - return self.permute(*dims) - - def swapaxes(self, axis0: int, axis1: int) -> LoraTorchTensor: - return self.transpose(axis0, axis1) - - def to(self, *args, **kwargs): - return LoraTorchTensor(self._lora_A.to(*args, **kwargs), self._lora_B.to(*args, **kwargs)) - - @classmethod - def __torch_function__(cls, func: Callable, types, args=(), kwargs=None): - del types # unused - - if kwargs is None: - kwargs = {} - - if func is torch.permute: - return type(args[0]).permute(*args, **kwargs) - elif func is torch.reshape: - return type(args[0]).reshape(*args, **kwargs) - elif func is torch.stack: - assert isinstance(args[0], Sequence) - dim = kwargs.get("dim", 0) - assert dim == 0 - return LoraTorchTensor( - torch.stack([a._lora_A for a in args[0]], dim), - torch.stack([b._lora_B for b in args[0]], dim), - ) - elif func is torch.cat: - assert isinstance(args[0], Sequence) - dim = kwargs.get("dim", 0) - assert dim == 0 - if len(args[0][0].shape) > 2: - return LoraTorchTensor( - torch.cat([a._lora_A for a in args[0]], dim), - torch.cat([b._lora_B for b in args[0]], dim), - ) - elif all(torch.equal(args[0][0]._lora_A, t._lora_A) for t in args[0][1:]): - return LoraTorchTensor( - args[0][0]._lora_A, - torch.cat([b._lora_B for b in args[0]], dim), - ) - else: - raise NotImplementedError - else: - raise NotImplementedError - - -def get_base_tensor_name(lora_tensor_name: str) -> str: - base_name = lora_tensor_name.replace("base_model.model.", "") - base_name = base_name.replace(".lora_A.weight", ".weight") - base_name = base_name.replace(".lora_B.weight", ".weight") - return base_name - - -def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser( - description="Convert a huggingface PEFT LoRA adapter to a GGML compatible file") - parser.add_argument( - "--outfile", type=Path, - help="path to write to; default: based on input. {ftype} will be replaced by the outtype.", - ) - parser.add_argument( - "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "auto"], default="f16", - help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type", - ) - parser.add_argument( - "--bigendian", action="store_true", - help="model is executed on big endian machine", - ) - parser.add_argument( - "--no-lazy", action="store_true", - help="use more RAM by computing all outputs before writing (use in case lazy evaluation is broken)", - ) - parser.add_argument( - "--verbose", action="store_true", - help="increase output verbosity", - ) - parser.add_argument( - "--dry-run", action="store_true", - help="only print out what will be done, without writing any new files", - ) - parser.add_argument( - "--base", type=Path, required=True, - help="directory containing base model file", - ) - parser.add_argument( - "lora_path", type=Path, - help="directory containing LoRA adapter file", - ) - - return parser.parse_args() - - -if __name__ == '__main__': - args = parse_args() - logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) - - ftype_map: dict[str, gguf.LlamaFileType] = { - "f32": gguf.LlamaFileType.ALL_F32, - "f16": gguf.LlamaFileType.MOSTLY_F16, - "bf16": gguf.LlamaFileType.MOSTLY_BF16, - "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0, - "auto": gguf.LlamaFileType.GUESSED, - } - - ftype = ftype_map[args.outtype] - - dir_base_model: Path = args.base - dir_lora: Path = args.lora_path - lora_config = dir_lora / "adapter_config.json" - input_model = dir_lora / "adapter_model.safetensors" - - if args.outfile is not None: - fname_out = args.outfile - else: - # output in the same directory as the model by default - fname_out = dir_lora - - if os.path.exists(input_model): - # lazy import load_file only if lora is in safetensors format. - from safetensors.torch import load_file - - lora_model = load_file(input_model, device="cpu") - else: - input_model = os.path.join(dir_lora, "adapter_model.bin") - lora_model = torch.load(input_model, map_location="cpu", weights_only=True) - - # load base model - logger.info(f"Loading base model: {dir_base_model.name}") - hparams = Model.load_hparams(dir_base_model) - with torch.inference_mode(): - try: - model_class = Model.from_model_architecture(hparams["architectures"][0]) - except NotImplementedError: - logger.error(f"Model {hparams['architectures'][0]} is not supported") - sys.exit(1) - - class LoraModel(model_class): - model_arch = model_class.model_arch - - lora_alpha: float - - def __init__(self, *args, dir_lora_model: Path, lora_alpha: float, **kwargs): - - super().__init__(*args, **kwargs) - - self.dir_model_card = dir_lora_model - self.lora_alpha = float(lora_alpha) - - def set_type(self): - self.gguf_writer.add_type(gguf.GGUFType.ADAPTER) - self.gguf_writer.add_string(gguf.Keys.Adapter.TYPE, "lora") - - def set_gguf_parameters(self): - self.gguf_writer.add_float32(gguf.Keys.Adapter.LORA_ALPHA, self.lora_alpha) - super().set_gguf_parameters() - - def get_tensors(self) -> Iterator[tuple[str, Tensor]]: - tensor_map: dict[str, PartialLoraTensor] = {} - - for name, tensor in lora_model.items(): - if self.lazy: - tensor = LazyTorchTensor.from_eager(tensor) - base_name = get_base_tensor_name(name) - is_lora_a = ".lora_A.weight" in name - is_lora_b = ".lora_B.weight" in name - if not is_lora_a and not is_lora_b: - if ".base_layer.weight" in name: - continue - logger.error(f"Unexpected name '{name}': Not a lora_A or lora_B tensor") - sys.exit(1) - - if base_name in tensor_map: - if is_lora_a: - tensor_map[base_name].A = tensor - else: - tensor_map[base_name].B = tensor - else: - if is_lora_a: - tensor_map[base_name] = PartialLoraTensor(A=tensor) - else: - tensor_map[base_name] = PartialLoraTensor(B=tensor) - - for name, tensor in tensor_map.items(): - assert tensor.A is not None - assert tensor.B is not None - yield (name, cast(torch.Tensor, LoraTorchTensor(tensor.A, tensor.B))) - - def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: - dest = super().modify_tensors(data_torch, name, bid) - for dest_name, dest_data in dest: - assert isinstance(dest_data, LoraTorchTensor) - lora_a, lora_b = dest_data.get_lora_A_B() - - yield (dest_name + ".lora_a", lora_a) - yield (dest_name + ".lora_b", lora_b) - - with open(lora_config, "r") as f: - lparams: dict[str, Any] = json.load(f) - - alpha: float = lparams["lora_alpha"] - - model_instance = LoraModel( - dir_base_model, - ftype, - fname_out, - is_big_endian=args.bigendian, - use_temp_file=False, - eager=args.no_lazy, - dry_run=args.dry_run, - dir_lora_model=dir_lora, - lora_alpha=alpha, - ) - - logger.info("Exporting model...") - model_instance.write() - logger.info(f"Model successfully exported to {model_instance.fname_out}") diff --git a/examples/CMakeLists.txt b/core/CMakeLists.txt similarity index 82% rename from examples/CMakeLists.txt rename to core/CMakeLists.txt index 1e10862b2..3c298a0df 100644 --- a/examples/CMakeLists.txt +++ b/core/CMakeLists.txt @@ -6,7 +6,7 @@ find_package(Threads REQUIRED) # ... -# examples +# core include_directories(${CMAKE_CURRENT_SOURCE_DIR}) @@ -16,6 +16,4 @@ else() if (GGML_RPC) add_subdirectory(rpc) endif() - if (LLAMA_BUILD_SERVER) - endif() endif() diff --git a/examples/deprecation-warning/README.md b/core/deprecation-warning/README.md similarity index 100% rename from examples/deprecation-warning/README.md rename to core/deprecation-warning/README.md diff --git a/examples/deprecation-warning/deprecation-warning.cpp b/core/deprecation-warning/deprecation-warning.cpp similarity index 100% rename from examples/deprecation-warning/deprecation-warning.cpp rename to core/deprecation-warning/deprecation-warning.cpp diff --git a/examples/main-cmake-pkg/.gitignore b/core/main-cmake-pkg/.gitignore similarity index 100% rename from examples/main-cmake-pkg/.gitignore rename to core/main-cmake-pkg/.gitignore diff --git a/examples/main-cmake-pkg/CMakeLists.txt b/core/main-cmake-pkg/CMakeLists.txt similarity index 100% rename from examples/main-cmake-pkg/CMakeLists.txt rename to core/main-cmake-pkg/CMakeLists.txt diff --git a/examples/main-cmake-pkg/README.md b/core/main-cmake-pkg/README.md similarity index 100% rename from examples/main-cmake-pkg/README.md rename to core/main-cmake-pkg/README.md diff --git a/examples/main/CMakeLists.txt b/core/main/CMakeLists.txt similarity index 100% rename from examples/main/CMakeLists.txt rename to core/main/CMakeLists.txt diff --git a/examples/main/README.md b/core/main/README.md similarity index 100% rename from examples/main/README.md rename to core/main/README.md diff --git a/examples/main/main.cpp b/core/main/main.cpp similarity index 99% rename from examples/main/main.cpp rename to core/main/main.cpp index 08f0b39e4..8b1732a09 100644 --- a/examples/main/main.cpp +++ b/core/main/main.cpp @@ -105,7 +105,7 @@ static void sigint_handler(int signo) { } else { console::cleanup(); printf("\n"); - llama_print_timings(*g_ctx); + antigma_print_timings(*g_ctx); write_logfile(*g_ctx, *g_params, *g_model, *g_input_tokens, g_output_ss->str(), *g_output_tokens); _exit(130); } @@ -992,7 +992,7 @@ int main(int argc, char ** argv) { llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size()); } - llama_print_timings(ctx); + antigma_print_timings(ctx); write_logfile(ctx, params, model, input_tokens, output_ss.str(), output_tokens); if (ctx_guidance) { llama_free(ctx_guidance); } diff --git a/examples/rpc/CMakeLists.txt b/core/rpc/CMakeLists.txt similarity index 100% rename from examples/rpc/CMakeLists.txt rename to core/rpc/CMakeLists.txt diff --git a/examples/rpc/README.md b/core/rpc/README.md similarity index 100% rename from examples/rpc/README.md rename to core/rpc/README.md diff --git a/examples/rpc/rpc-server.cpp b/core/rpc/rpc-server.cpp similarity index 100% rename from examples/rpc/rpc-server.cpp rename to core/rpc/rpc-server.cpp diff --git a/examples/sycl/CMakeLists.txt b/core/sycl/CMakeLists.txt similarity index 100% rename from examples/sycl/CMakeLists.txt rename to core/sycl/CMakeLists.txt diff --git a/examples/sycl/README.md b/core/sycl/README.md similarity index 100% rename from examples/sycl/README.md rename to core/sycl/README.md diff --git a/examples/sycl/build.sh b/core/sycl/build.sh similarity index 100% rename from examples/sycl/build.sh rename to core/sycl/build.sh diff --git a/examples/sycl/ls-sycl-device.cpp b/core/sycl/ls-sycl-device.cpp similarity index 100% rename from examples/sycl/ls-sycl-device.cpp rename to core/sycl/ls-sycl-device.cpp diff --git a/examples/sycl/run-llama2.sh b/core/sycl/run-llama2.sh similarity index 100% rename from examples/sycl/run-llama2.sh rename to core/sycl/run-llama2.sh diff --git a/examples/sycl/win-build-sycl.bat b/core/sycl/win-build-sycl.bat similarity index 100% rename from examples/sycl/win-build-sycl.bat rename to core/sycl/win-build-sycl.bat diff --git a/examples/sycl/win-run-llama2.bat b/core/sycl/win-run-llama2.bat similarity index 100% rename from examples/sycl/win-run-llama2.bat rename to core/sycl/win-run-llama2.bat diff --git a/docs/android.md b/docs/android.md deleted file mode 100644 index cec4358d9..000000000 --- a/docs/android.md +++ /dev/null @@ -1,56 +0,0 @@ - -# Android - -## Build on Android using Termux -[Termux](https://github.com/termux/termux-app#installation) is a method to execute `llama.cpp` on an Android device (no root required). -``` -apt update && apt upgrade -y -apt install git make cmake -``` - -It's recommended to move your model inside the `~/` directory for best performance: -``` -cd storage/downloads -mv model.gguf ~/ -``` - -[Get the code](https://github.com/ggerganov/llama.cpp#get-the-code) & [follow the Linux build instructions](https://github.com/ggerganov/llama.cpp#build) to build `llama.cpp`. - -## Building the Project using Android NDK -Obtain the [Android NDK](https://developer.android.com/ndk) and then build with CMake. - -Execute the following commands on your computer to avoid downloading the NDK to your mobile. Alternatively, you can also do this in Termux: -``` -$ mkdir build-android -$ cd build-android -$ export NDK= -$ cmake -DCMAKE_TOOLCHAIN_FILE=$NDK/build/cmake/android.toolchain.cmake -DANDROID_ABI=arm64-v8a -DANDROID_PLATFORM=android-23 -DCMAKE_C_FLAGS=-march=armv8.4a+dotprod .. -$ make -``` - -Install [termux](https://github.com/termux/termux-app#installation) on your device and run `termux-setup-storage` to get access to your SD card (if Android 11+ then run the command twice). - -Finally, copy these built `llama` binaries and the model file to your device storage. Because the file permissions in the Android sdcard cannot be changed, you can copy the executable files to the `/data/data/com.termux/files/home/bin` path, and then execute the following commands in Termux to add executable permission: - -(Assumed that you have pushed the built executable files to the /sdcard/llama.cpp/bin path using `adb push`) -``` -$cp -r /sdcard/llama.cpp/bin /data/data/com.termux/files/home/ -$cd /data/data/com.termux/files/home/bin -$chmod +x ./* -``` - -Download model [llama-2-7b-chat.Q4_K_M.gguf](https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF/blob/main/llama-2-7b-chat.Q4_K_M.gguf), and push it to `/sdcard/llama.cpp/`, then move it to `/data/data/com.termux/files/home/model/` - -``` -$mv /sdcard/llama.cpp/llama-2-7b-chat.Q4_K_M.gguf /data/data/com.termux/files/home/model/ -``` - -Now, you can start chatting: -``` -$cd /data/data/com.termux/files/home/bin -$./llama-cli -m ../model/llama-2-7b-chat.Q4_K_M.gguf -n 128 -cml -``` - -Here's a demo of an interactive session running on Pixel 5 phone: - -https://user-images.githubusercontent.com/271616/225014776-1d567049-ad71-4ef2-b050-55b0b3b9274c.mp4 diff --git a/docs/backend/BLIS.md b/docs/backend/BLIS.md deleted file mode 100644 index 35d06bd0f..000000000 --- a/docs/backend/BLIS.md +++ /dev/null @@ -1,67 +0,0 @@ -BLIS Installation Manual ------------------------- - -BLIS is a portable software framework for high-performance BLAS-like dense linear algebra libraries. It has received awards and recognition, including the 2023 James H. Wilkinson Prize for Numerical Software and the 2020 SIAM Activity Group on Supercomputing Best Paper Prize. BLIS provides a new BLAS-like API and a compatibility layer for traditional BLAS routine calls. It offers features such as object-based API, typed API, BLAS and CBLAS compatibility layers. - -Project URL: https://github.com/flame/blis - -### Prepare: - -Compile BLIS: - -```bash -git clone https://github.com/flame/blis -cd blis -./configure --enable-cblas -t openmp,pthreads auto -# will install to /usr/local/ by default. -make -j -``` - -Install BLIS: - -```bash -sudo make install -``` - -We recommend using openmp since it's easier to modify the cores being used. - -### llama.cpp compilation - -Makefile: - -```bash -make GGML_BLIS=1 -j -# make GGML_BLIS=1 llama-benchmark-matmult -``` - -CMake: - -```bash -mkdir build -cd build -cmake -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=FLAME .. -make -j -``` - -### llama.cpp execution - -According to the BLIS documentation, we could set the following -environment variables to modify the behavior of openmp: - -```bash -export GOMP_CPU_AFFINITY="0-19" -export BLIS_NUM_THREADS=14 -``` - -And then run the binaries as normal. - - -### Intel specific issue - -Some might get the error message saying that `libimf.so` cannot be found. -Please follow this [stackoverflow page](https://stackoverflow.com/questions/70687930/intel-oneapi-2022-libimf-so-no-such-file-or-directory-during-openmpi-compila). - -### Reference: - -1. https://github.com/flame/blis#getting-started -2. https://github.com/flame/blis/blob/master/docs/Multithreading.md diff --git a/docs/backend/SYCL.md b/docs/backend/SYCL.md deleted file mode 100644 index d36ac0a15..000000000 --- a/docs/backend/SYCL.md +++ /dev/null @@ -1,580 +0,0 @@ -# llama.cpp for SYCL - -- [Background](#background) -- [Recommended Release](#recommended-release) -- [News](#news) -- [OS](#os) -- [Hardware](#hardware) -- [Docker](#docker) -- [Linux](#linux) -- [Windows](#windows) -- [Environment Variable](#environment-variable) -- [Known Issue](#known-issues) -- [Q&A](#qa) -- [TODO](#todo) - -## Background - -**SYCL** is a high-level parallel programming model designed to improve developers productivity writing code across various hardware accelerators such as CPUs, GPUs, and FPGAs. It is a single-source language designed for heterogeneous computing and based on standard C++17. - -**oneAPI** is an open ecosystem and a standard-based specification, supporting multiple architectures including but not limited to intel CPUs, GPUs and FPGAs. The key components of the oneAPI ecosystem include: - -- **DPCPP** *(Data Parallel C++)*: The primary oneAPI SYCL implementation, which includes the icpx/icx Compilers. -- **oneAPI Libraries**: A set of highly optimized libraries targeting multiple domains *(e.g. oneMKL - Math Kernel Library)*. -- **oneAPI LevelZero**: A high performance low level interface for fine-grained control over intel iGPUs and dGPUs. -- **Nvidia & AMD Plugins**: These are plugins extending oneAPI's DPCPP support to SYCL on Nvidia and AMD GPU targets. - -### Llama.cpp + SYCL - -The llama.cpp SYCL backend is designed to support **Intel GPU** firstly. Based on the cross-platform feature of SYCL, it could support other vendor GPUs: Nvidia GPU (*AMD GPU coming*). - -When targeting **Intel CPU**, it is recommended to use llama.cpp for [Intel oneMKL](README.md#intel-onemkl) backend. - -It has the similar design of other llama.cpp BLAS-based paths such as *OpenBLAS, cuBLAS, etc..*. In beginning work, the oneAPI's [SYCLomatic](https://github.com/oneapi-src/SYCLomatic) open-source migration tool (Commercial release [Intel® DPC++ Compatibility Tool](https://www.intel.com/content/www/us/en/developer/tools/oneapi/dpc-compatibility-tool.html)) was used for this purpose. - -## Recommended Release - -The SYCL backend would be broken by some PRs due to no online CI. - -The following release is verified with good quality: - -|Commit ID|Tag|Release|Verified Platform| -|-|-|-|-| -|fb76ec31a9914b7761c1727303ab30380fd4f05c|b3038 |[llama-b3038-bin-win-sycl-x64.zip](https://github.com/ggerganov/llama.cpp/releases/download/b3038/llama-b3038-bin-win-sycl-x64.zip) |Arc770/Linux/oneAPI 2024.1
MTL Arc GPU/Windows 11/oneAPI 2024.1| - - -## News - -- 2024.5 - - Performance is increased: 34 -> 37 tokens/s of llama-2-7b.Q4_0 on Arc770. - - Arch Linux is verified successfully. - -- 2024.4 - - Support data types: GGML_TYPE_IQ4_NL, GGML_TYPE_IQ4_XS, GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ3_S, GGML_TYPE_IQ2_XXS, GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M. - -- 2024.3 - - Release binary files of Windows. - - A blog is published: **Run LLM on all Intel GPUs Using llama.cpp**: [intel.com](https://www.intel.com/content/www/us/en/developer/articles/technical/run-llm-on-all-gpus-using-llama-cpp-artical.html) or [medium.com](https://medium.com/@jianyu_neo/run-llm-on-all-intel-gpus-using-llama-cpp-fd2e2dcbd9bd). - - New base line is ready: [tag b2437](https://github.com/ggerganov/llama.cpp/tree/b2437). - - Support multiple cards: **--split-mode**: [none|layer]; not support [row], it's on developing. - - Support to assign main GPU by **--main-gpu**, replace $GGML_SYCL_DEVICE. - - Support detecting all GPUs with level-zero and same top **Max compute units**. - - Support OPs - - hardsigmoid - - hardswish - - pool2d - -- 2024.1 - - Create SYCL backend for Intel GPU. - - Support Windows build - -## OS - -| OS | Status | Verified | -|---------|---------|------------------------------------------------| -| Linux | Support | Ubuntu 22.04, Fedora Silverblue 39, Arch Linux | -| Windows | Support | Windows 11 | - - -## Hardware - -### Intel GPU - -**Verified devices** - -| Intel GPU | Status | Verified Model | -|-------------------------------|---------|---------------------------------------| -| Intel Data Center Max Series | Support | Max 1550, 1100 | -| Intel Data Center Flex Series | Support | Flex 170 | -| Intel Arc Series | Support | Arc 770, 730M, Arc A750 | -| Intel built-in Arc GPU | Support | built-in Arc GPU in Meteor Lake | -| Intel iGPU | Support | iGPU in i5-1250P, i7-1260P, i7-1165G7 | - -*Notes:* - -- **Memory** - - The device memory is a limitation when running a large model. The loaded model size, *`llm_load_tensors: buffer_size`*, is displayed in the log when running `./bin/llama-cli`. - - - Please make sure the GPU shared memory from the host is large enough to account for the model's size. For e.g. the *llama-2-7b.Q4_0* requires at least 8.0GB for integrated GPU and 4.0GB for discrete GPU. - -- **Execution Unit (EU)** - - If the iGPU has less than 80 EUs, the inference speed will likely be too slow for practical use. - -### Other Vendor GPU - -**Verified devices** - -| Nvidia GPU | Status | Verified Model | -|--------------------------|---------|----------------| -| Ampere Series | Support | A100, A4000 | -| Ampere Series *(Mobile)* | Support | RTX 40 Series | - -## Docker -The docker build option is currently limited to *intel GPU* targets. - -### Build image -```sh -# Using FP16 -docker build -t llama-cpp-sycl --build-arg="GGML_SYCL_F16=ON" -f .devops/llama-cli-intel.Dockerfile . -``` - -*Notes*: - -To build in default FP32 *(Slower than FP16 alternative)*, you can remove the `--build-arg="GGML_SYCL_F16=ON"` argument from the previous command. - -You can also use the `.devops/llama-server-intel.Dockerfile`, which builds the *"server"* alternative. - -### Run container - -```sh -# First, find all the DRI cards -ls -la /dev/dri -# Then, pick the card that you want to use (here for e.g. /dev/dri/card1). -docker run -it --rm -v "$(pwd):/app:Z" --device /dev/dri/renderD128:/dev/dri/renderD128 --device /dev/dri/card1:/dev/dri/card1 llama-cpp-sycl -m "/app/models/YOUR_MODEL_FILE" -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -``` - -*Notes:* -- Docker has been tested successfully on native Linux. WSL support has not been verified yet. -- You may need to install Intel GPU driver on the **host** machine *(Please refer to the [Linux configuration](#linux) for details)*. - -## Linux - -### I. Setup Environment - -1. **Install GPU drivers** - - - **Intel GPU** - -Intel data center GPUs drivers installation guide and download page can be found here: [Get intel dGPU Drivers](https://dgpu-docs.intel.com/driver/installation.html#ubuntu-install-steps). - -*Note*: for client GPUs *(iGPU & Arc A-Series)*, please refer to the [client iGPU driver installation](https://dgpu-docs.intel.com/driver/client/overview.html). - -Once installed, add the user(s) to the `video` and `render` groups. - -```sh -sudo usermod -aG render $USER -sudo usermod -aG video $USER -``` - -*Note*: logout/re-login for the changes to take effect. - -Verify installation through `clinfo`: - -```sh -sudo apt install clinfo -sudo clinfo -l -``` - -Sample output: - -```sh -Platform #0: Intel(R) OpenCL Graphics - `-- Device #0: Intel(R) Arc(TM) A770 Graphics - -Platform #0: Intel(R) OpenCL HD Graphics - `-- Device #0: Intel(R) Iris(R) Xe Graphics [0x9a49] -``` - -- **Nvidia GPU** - -In order to target Nvidia GPUs through SYCL, please make sure the CUDA/CUBLAS native requirements *-found [here](README.md#cuda)-* are installed. - -2. **Install Intel® oneAPI Base toolkit** - -- **For Intel GPU** - -The base toolkit can be obtained from the official [Intel® oneAPI Base Toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html) page. - -Please follow the instructions for downloading and installing the Toolkit for Linux, and preferably keep the default installation values unchanged, notably the installation path *(`/opt/intel/oneapi` by default)*. - -Following guidelines/code snippets assume the default installation values. Otherwise, please make sure the necessary changes are reflected where applicable. - -Upon a successful installation, SYCL is enabled for the available intel devices, along with relevant libraries such as oneAPI MKL for intel GPUs. - -- **Adding support to Nvidia GPUs** - -**oneAPI Plugin**: In order to enable SYCL support on Nvidia GPUs, please install the [Codeplay oneAPI Plugin for Nvidia GPUs](https://developer.codeplay.com/products/oneapi/nvidia/download). User should also make sure the plugin version matches the installed base toolkit one *(previous step)* for a seamless "oneAPI on Nvidia GPU" setup. - - -**oneMKL for cuBlas**: The current oneMKL releases *(shipped with the oneAPI base-toolkit)* do not contain the cuBLAS backend. A build from source of the upstream [oneMKL](https://github.com/oneapi-src/oneMKL) with the *cuBLAS* backend enabled is thus required to run it on Nvidia GPUs. - -```sh -git clone https://github.com/oneapi-src/oneMKL -cd oneMKL -cmake -B buildWithCublas -DCMAKE_CXX_COMPILER=icpx -DCMAKE_C_COMPILER=icx -DENABLE_MKLGPU_BACKEND=OFF -DENABLE_MKLCPU_BACKEND=OFF -DENABLE_CUBLAS_BACKEND=ON -DTARGET_DOMAINS=blas -cmake --build buildWithCublas --config Release -``` - - -3. **Verify installation and environment** - -In order to check the available SYCL devices on the machine, please use the `sycl-ls` command. -```sh -source /opt/intel/oneapi/setvars.sh -sycl-ls -``` - -- **Intel GPU** - -When targeting an intel GPU, the user should expect one or more level-zero devices among the available SYCL devices. Please make sure that at least one GPU is present, for instance [`ext_oneapi_level_zero:gpu:0`] in the sample output below: - -``` -[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000] -[opencl:cpu:1] Intel(R) OpenCL, 13th Gen Intel(R) Core(TM) i7-13700K OpenCL 3.0 (Build 0) [2023.16.10.0.17_160000] -[opencl:gpu:2] Intel(R) OpenCL Graphics, Intel(R) Arc(TM) A770 Graphics OpenCL 3.0 NEO [23.30.26918.50] -[ext_oneapi_level_zero:gpu:0] Intel(R) Level-Zero, Intel(R) Arc(TM) A770 Graphics 1.3 [1.3.26918] -``` - -- **Nvidia GPU** - -Similarly, user targeting Nvidia GPUs should expect at least one SYCL-CUDA device [`ext_oneapi_cuda:gpu`] as bellow: -``` -[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.12.0.12_195853.xmain-hotfix] -[opencl:cpu:1] Intel(R) OpenCL, Intel(R) Xeon(R) Gold 6326 CPU @ 2.90GHz OpenCL 3.0 (Build 0) [2023.16.12.0.12_195853.xmain-hotfix] -[ext_oneapi_cuda:gpu:0] NVIDIA CUDA BACKEND, NVIDIA A100-PCIE-40GB 8.0 [CUDA 12.2] -``` - -### II. Build llama.cpp - -#### Intel GPU -```sh -# Export relevant ENV variables -source /opt/intel/oneapi/setvars.sh - -# Build LLAMA with MKL BLAS acceleration for intel GPU - -# Option 1: Use FP32 (recommended for better performance in most cases) -cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx - -# Option 2: Use FP16 -cmake -B build -DGGML_SYCL=ON -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON - -# build all binary -cmake --build build --config Release -j -v -``` - -#### Nvidia GPU -```sh -# Export relevant ENV variables -export LD_LIBRARY_PATH=/path/to/oneMKL/buildWithCublas/lib:$LD_LIBRARY_PATH -export LIBRARY_PATH=/path/to/oneMKL/buildWithCublas/lib:$LIBRARY_PATH -export CPLUS_INCLUDE_DIR=/path/to/oneMKL/buildWithCublas/include:$CPLUS_INCLUDE_DIR -export CPLUS_INCLUDE_DIR=/path/to/oneMKL/include:$CPLUS_INCLUDE_DIR - -# Build LLAMA with Nvidia BLAS acceleration through SYCL - -# Option 1: Use FP32 (recommended for better performance in most cases) -cmake -B build -DGGML_SYCL=ON -DGGML_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx - -# Option 2: Use FP16 -cmake -B build -DGGML_SYCL=ON -DGGML_SYCL_TARGET=NVIDIA -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_SYCL_F16=ON - -# build all binary -cmake --build build --config Release -j -v - -``` - -### III. Run the inference - -1. Retrieve and prepare model - -You can refer to the general [*Prepare and Quantize*](README.md#prepare-and-quantize) guide for model prepration, or simply download [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) model as example. - -2. Enable oneAPI running environment - -```sh -source /opt/intel/oneapi/setvars.sh -``` - -3. List devices information - -Similar to the native `sycl-ls`, available SYCL devices can be queried as follow: - -```sh -./build/bin/llama-ls-sycl-device -``` -This command will only display the selected backend that is supported by SYCL. The default backend is level_zero. For example, in a system with 2 *intel GPU* it would look like the following: -``` -found 2 SYCL devices: - -| | | |Compute |Max compute|Max work|Max sub| | -|ID| Device Type| Name|capability|units |group |group |Global mem size| -|--|------------------|---------------------------------------------|----------|-----------|--------|-------|---------------| -| 0|[level_zero:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 1.3| 512| 1024| 32| 16225243136| -| 1|[level_zero:gpu:1]| Intel(R) UHD Graphics 770| 1.3| 32| 512| 32| 53651849216| -``` - - -4. Launch inference - -There are two device selection modes: - -- Single device: Use one device target specified by the user. -- Multiple devices: Automatically choose the devices with the same backend. - -In two device selection modes, the default SYCL backend is level_zero, you can choose other backend supported by SYCL by setting environment variable ONEAPI_DEVICE_SELECTOR. - -| Device selection | Parameter | -|------------------|----------------------------------------| -| Single device | --split-mode none --main-gpu DEVICE_ID | -| Multiple devices | --split-mode layer (default) | - -Examples: - -- Use device 0: - -```sh -ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm none -mg 0 -``` -or run by script: - -```sh -./examples/sycl/run_llama2.sh 0 -``` - -- Use multiple devices: - -```sh -ZES_ENABLE_SYSMAN=1 ./build/bin/llama-cli -m models/llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 400 -e -ngl 33 -sm layer -``` - -Otherwise, you can run the script: - -```sh -./examples/sycl/run_llama2.sh -``` - -*Notes:* - -- Upon execution, verify the selected device(s) ID(s) in the output log, which can for instance be displayed as follow: - -```sh -detect 1 SYCL GPUs: [0] with top Max compute units:512 -``` -Or -```sh -use 1 SYCL GPUs: [0] with Max compute units:512 -``` - -## Windows - -### I. Setup Environment - -1. Install GPU driver - -Intel GPU drivers instructions guide and download page can be found here: [Get intel GPU Drivers](https://www.intel.com/content/www/us/en/products/docs/discrete-gpus/arc/software/drivers.html). - -2. Install Visual Studio - -If you already have a recent version of Microsoft Visual Studio, you can skip this step. Otherwise, please refer to the official download page for [Microsoft Visual Studio](https://visualstudio.microsoft.com/). - -3. Install Intel® oneAPI Base toolkit - -The base toolkit can be obtained from the official [Intel® oneAPI Base Toolkit](https://www.intel.com/content/www/us/en/developer/tools/oneapi/base-toolkit.html) page. - -Please follow the instructions for downloading and installing the Toolkit for Windows, and preferably keep the default installation values unchanged, notably the installation path *(`C:\Program Files (x86)\Intel\oneAPI` by default)*. - -Following guidelines/code snippets assume the default installation values. Otherwise, please make sure the necessary changes are reflected where applicable. - -b. Enable oneAPI running environment: - -- Type "oneAPI" in the search bar, then open the `Intel oneAPI command prompt for Intel 64 for Visual Studio 2022` App. - -- On the command prompt, enable the runtime environment with the following: -``` -"C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 -``` - -c. Verify installation - -In the oneAPI command line, run the following to print the available SYCL devices: - -``` -sycl-ls -``` - -There should be one or more *level-zero* GPU devices displayed as **[ext_oneapi_level_zero:gpu]**. Below is example of such output detecting an *intel Iris Xe* GPU as a Level-zero SYCL device: - -Output (example): -``` -[opencl:acc:0] Intel(R) FPGA Emulation Platform for OpenCL(TM), Intel(R) FPGA Emulation Device OpenCL 1.2 [2023.16.10.0.17_160000] -[opencl:cpu:1] Intel(R) OpenCL, 11th Gen Intel(R) Core(TM) i7-1185G7 @ 3.00GHz OpenCL 3.0 (Build 0) [2023.16.10.0.17_160000] -[opencl:gpu:2] Intel(R) OpenCL Graphics, Intel(R) Iris(R) Xe Graphics OpenCL 3.0 NEO [31.0.101.5186] -[ext_oneapi_level_zero:gpu:0] Intel(R) Level-Zero, Intel(R) Iris(R) Xe Graphics 1.3 [1.3.28044] -``` - -4. Install build tools - -a. Download & install cmake for Windows: https://cmake.org/download/ (CMake can also be installed from Visual Studio Installer) -b. The new Visual Studio will install Ninja as default. (If not, please install it manually: https://ninja-build.org/) - - -### II. Build llama.cpp - -On the oneAPI command line window, step into the llama.cpp main directory and run the following: - -``` -@call "C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 --force - -# Option 1: Use FP32 (recommended for better performance in most cases) -cmake -B build -G "Ninja" -DGGML_SYCL=ON -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release - -# Option 2: Or FP16 -cmake -B build -G "Ninja" -DGGML_SYCL=ON -DCMAKE_C_COMPILER=cl -DCMAKE_CXX_COMPILER=icx -DCMAKE_BUILD_TYPE=Release -DGGML_SYCL_F16=ON - -cmake --build build --config Release -j -``` - -Otherwise, run the `win-build-sycl.bat` wrapper which encapsulates the former instructions: -```sh -.\examples\sycl\win-build-sycl.bat -``` - -Or, use CMake presets to build: -```sh -cmake --preset x64-windows-sycl-release -cmake --build build-x64-windows-sycl-release -j --target llama-cli - -cmake -DGGML_SYCL_F16=ON --preset x64-windows-sycl-release -cmake --build build-x64-windows-sycl-release -j --target llama-cli - -cmake --preset x64-windows-sycl-debug -cmake --build build-x64-windows-sycl-debug -j --target llama-cli -``` - -Or, you can use Visual Studio to open llama.cpp folder as a CMake project. Choose the sycl CMake presets (`x64-windows-sycl-release` or `x64-windows-sycl-debug`) before you compile the project. - -*Notes:* - -- In case of a minimal experimental setup, the user can build the inference executable only through `cmake --build build --config Release -j --target llama-cli`. - -### III. Run the inference - -1. Retrieve and prepare model - -You can refer to the general [*Prepare and Quantize*](README#prepare-and-quantize) guide for model prepration, or simply download [llama-2-7b.Q4_0.gguf](https://huggingface.co/TheBloke/Llama-2-7B-GGUF/blob/main/llama-2-7b.Q4_0.gguf) model as example. - -2. Enable oneAPI running environment - -On the oneAPI command line window, run the following and step into the llama.cpp directory: -``` -"C:\Program Files (x86)\Intel\oneAPI\setvars.bat" intel64 -``` - -3. List devices information - -Similar to the native `sycl-ls`, available SYCL devices can be queried as follow: - -``` -build\bin\ls-sycl-device.exe -``` - -This command will only display the selected backend that is supported by SYCL. The default backend is level_zero. For example, in a system with 2 *intel GPU* it would look like the following: -``` -found 2 SYCL devices: -| | | |Compute |Max compute|Max work|Max sub| | -|ID| Device Type| Name|capability|units |group |group |Global mem size| -|--|------------------|---------------------------------------------|----------|-----------|--------|-------|---------------| -| 0|[level_zero:gpu:0]| Intel(R) Arc(TM) A770 Graphics| 1.3| 512| 1024| 32| 16225243136| -| 1|[level_zero:gpu:1]| Intel(R) UHD Graphics 770| 1.3| 32| 512| 32| 53651849216| - -``` - - -4. Launch inference - -There are two device selection modes: - -- Single device: Use one device assigned by user. Default device id is 0. -- Multiple devices: Automatically choose the devices with the same backend. - -In two device selection modes, the default SYCL backend is level_zero, you can choose other backend supported by SYCL by setting environment variable ONEAPI_DEVICE_SELECTOR. - -| Device selection | Parameter | -|------------------|----------------------------------------| -| Single device | --split-mode none --main-gpu DEVICE_ID | -| Multiple devices | --split-mode layer (default) | - -Examples: - -- Use device 0: - -``` -build\bin\llama-cli.exe -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 -sm none -mg 0 -``` - -- Use multiple devices: - -``` -build\bin\llama-cli.exe -m models\llama-2-7b.Q4_0.gguf -p "Building a website can be done in 10 simple steps:\nStep 1:" -n 400 -e -ngl 33 -s 0 -sm layer -``` -Otherwise, run the following wrapper script: - -``` -.\examples\sycl\win-run-llama2.bat -``` - -Note: - -- Upon execution, verify the selected device(s) ID(s) in the output log, which can for instance be displayed as follow: - -```sh -detect 1 SYCL GPUs: [0] with top Max compute units:512 -``` -Or -```sh -use 1 SYCL GPUs: [0] with Max compute units:512 -``` - -## Environment Variable - -#### Build - -| Name | Value | Function | -|--------------------|-----------------------------------|---------------------------------------------| -| GGML_SYCL | ON (mandatory) | Enable build with SYCL code path. | -| GGML_SYCL_TARGET | INTEL *(default)* \| NVIDIA | Set the SYCL target device type. | -| GGML_SYCL_F16 | OFF *(default)* \|ON *(optional)* | Enable FP16 build with SYCL code path. | -| CMAKE_C_COMPILER | icx | Set *icx* compiler for SYCL code path. | -| CMAKE_CXX_COMPILER | icpx *(Linux)*, icx *(Windows)* | Set `icpx/icx` compiler for SYCL code path. | - -#### Runtime - -| Name | Value | Function | -|-------------------|------------------|---------------------------------------------------------------------------------------------------------------------------| -| GGML_SYCL_DEBUG | 0 (default) or 1 | Enable log function by macro: GGML_SYCL_DEBUG | -| ZES_ENABLE_SYSMAN | 0 (default) or 1 | Support to get free memory of GPU by sycl::aspect::ext_intel_free_memory.
Recommended to use when --split-mode = layer | - -## Known Issues - -- `Split-mode:[row]` is not supported. - -## Q&A - -- Error: `error while loading shared libraries: libsycl.so.7: cannot open shared object file: No such file or directory`. - - - Potential cause: Unavailable oneAPI installation or not set ENV variables. - - Solution: Install *oneAPI base toolkit* and enable its ENV through: `source /opt/intel/oneapi/setvars.sh`. - -- General compiler error: - - - Remove **build** folder or try a clean-build. - -- I can **not** see `[ext_oneapi_level_zero:gpu]` afer installing the GPU driver on Linux. - - Please double-check with `sudo sycl-ls`. - - If it's present in the list, please add video/render group to your user then **logout/login** or restart your system: - - ``` - sudo usermod -aG render $USER - sudo usermod -aG video $USER - ``` - Otherwise, please double-check the GPU driver installation steps. - -### **GitHub contribution**: -Please add the **[SYCL]** prefix/tag in issues/PRs titles to help the SYCL-team check/address them without delay. - -## TODO - -- Support row layer split for multiple card runs. diff --git a/docs/build.md b/docs/build.md deleted file mode 100644 index d9d12c467..000000000 --- a/docs/build.md +++ /dev/null @@ -1,340 +0,0 @@ -# Build llama.cpp locally - -**To get the Code:** - -```bash -git clone https://github.com/ggerganov/llama.cpp -cd llama.cpp -``` - -In order to build llama.cpp you have four different options. - -- Using `make`: - - On Linux or MacOS: - - ```bash - make - ``` - - - On Windows (x86/x64 only, arm64 requires cmake): - - 1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases). - 2. Extract `w64devkit` on your pc. - 3. Run `w64devkit.exe`. - 4. Use the `cd` command to reach the `llama.cpp` folder. - 5. From here you can run: - ```bash - make - ``` - - - Notes: - - For `Q4_0_4_4` quantization type build, add the `GGML_NO_LLAMAFILE=1` flag. For example, use `make GGML_NO_LLAMAFILE=1`. - - For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `make -j 8` will run 8 jobs in parallel. - - For faster repeated compilation, install [ccache](https://ccache.dev/). - - For debug builds, run `make LLAMA_DEBUG=1` - -- Using `CMake`: - - ```bash - cmake -B build - cmake --build build --config Release - ``` - - **Notes**: - - - For `Q4_0_4_4` quantization type build, add the `-DGGML_LLAMAFILE=OFF` cmake option. For example, use `cmake -B build -DGGML_LLAMAFILE=OFF`. - - For faster compilation, add the `-j` argument to run multiple jobs in parallel. For example, `cmake --build build --config Release -j 8` will run 8 jobs in parallel. - - For faster repeated compilation, install [ccache](https://ccache.dev/). - - For debug builds, there are two cases: - - 1. Single-config generators (e.g. default = `Unix Makefiles`; note that they just ignore the `--config` flag): - - ```bash - cmake -B build -DCMAKE_BUILD_TYPE=Debug - cmake --build build - ``` - - 2. Multi-config generators (`-G` param set to Visual Studio, XCode...): - - ```bash - cmake -B build -G "Xcode" - cmake --build build --config Debug - ``` - - Building for Windows (x86, x64 and arm64) with MSVC or clang as compilers: - - Install Visual Studio 2022, e.g. via the [Community Edition](https://visualstudio.microsoft.com/de/vs/community/). In the installer, select at least the following options (this also automatically installs the required additional tools like CMake,...): - - Tab Workload: Desktop-development with C++ - - Tab Components (select quickly via search): C++-_CMake_ Tools for Windows, _Git_ for Windows, C++-_Clang_ Compiler for Windows, MS-Build Support for LLVM-Toolset (clang) - - Please remember to always use a Developer Command Prompt / PowerShell for VS2022 for git, build, test - - For Windows on ARM (arm64, WoA) build with: - ```bash - cmake --preset arm64-windows-llvm-release -D GGML_OPENMP=OFF - cmake --build build-arm64-windows-llvm-release - ``` - Note: Building for arm64 could also be done just with MSVC (with the build-arm64-windows-MSVC preset, or the standard CMake build instructions). But MSVC does not support inline ARM assembly-code, used e.g. for the accelerated Q4_0_4_8 CPU kernels. - -- Using `gmake` (FreeBSD): - - 1. Install and activate [DRM in FreeBSD](https://wiki.freebsd.org/Graphics) - 2. Add your user to **video** group - 3. Install compilation dependencies. - - ```bash - sudo pkg install gmake automake autoconf pkgconf llvm15 openblas - - gmake CC=/usr/local/bin/clang15 CXX=/usr/local/bin/clang++15 -j4 - ``` - -## Metal Build - -On MacOS, Metal is enabled by default. Using Metal makes the computation run on the GPU. -To disable the Metal build at compile time use the `GGML_NO_METAL=1` flag or the `GGML_METAL=OFF` cmake option. - -When built with Metal support, you can explicitly disable GPU inference with the `--n-gpu-layers|-ngl 0` command-line -argument. - -## BLAS Build - -Building the program with BLAS support may lead to some performance improvements in prompt processing using batch sizes higher than 32 (the default is 512). Support with CPU-only BLAS implementations doesn't affect the normal generation performance. We may see generation performance improvements with GPU-involved BLAS implementations, e.g. cuBLAS, hipBLAS. There are currently several different BLAS implementations available for build and use: - -### Accelerate Framework: - -This is only available on Mac PCs and it's enabled by default. You can just build using the normal instructions. - -### OpenBLAS: - -This provides BLAS acceleration using only the CPU. Make sure to have OpenBLAS installed on your machine. - -- Using `make`: - - On Linux: - ```bash - make GGML_OPENBLAS=1 - ``` - - - On Windows: - - 1. Download the latest fortran version of [w64devkit](https://github.com/skeeto/w64devkit/releases). - 2. Download the latest version of [OpenBLAS for Windows](https://github.com/xianyi/OpenBLAS/releases). - 3. Extract `w64devkit` on your pc. - 4. From the OpenBLAS zip that you just downloaded copy `libopenblas.a`, located inside the `lib` folder, inside `w64devkit\x86_64-w64-mingw32\lib`. - 5. From the same OpenBLAS zip copy the content of the `include` folder inside `w64devkit\x86_64-w64-mingw32\include`. - 6. Run `w64devkit.exe`. - 7. Use the `cd` command to reach the `llama.cpp` folder. - 8. From here you can run: - - ```bash - make GGML_OPENBLAS=1 - ``` - -- Using `CMake` on Linux: - - ```bash - cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=OpenBLAS - cmake --build build --config Release - ``` - -### BLIS - -Check [BLIS.md](./backend/BLIS.md) for more information. - -### SYCL - -SYCL is a higher-level programming model to improve programming productivity on various hardware accelerators. - -llama.cpp based on SYCL is used to **support Intel GPU** (Data Center Max series, Flex series, Arc series, Built-in GPU and iGPU). - -For detailed info, please refer to [llama.cpp for SYCL](./backend/SYCL.md). - -### Intel oneMKL - -Building through oneAPI compilers will make avx_vnni instruction set available for intel processors that do not support avx512 and avx512_vnni. Please note that this build config **does not support Intel GPU**. For Intel GPU support, please refer to [llama.cpp for SYCL](./backend/SYCL.md). - -- Using manual oneAPI installation: - By default, `GGML_BLAS_VENDOR` is set to `Generic`, so if you already sourced intel environment script and assign `-DGGML_BLAS=ON` in cmake, the mkl version of Blas will automatically been selected. Otherwise please install oneAPI and follow the below steps: - ```bash - source /opt/intel/oneapi/setvars.sh # You can skip this step if in oneapi-basekit docker image, only required for manual installation - cmake -B build -DGGML_BLAS=ON -DGGML_BLAS_VENDOR=Intel10_64lp -DCMAKE_C_COMPILER=icx -DCMAKE_CXX_COMPILER=icpx -DGGML_NATIVE=ON - cmake --build build --config Release - ``` - -- Using oneAPI docker image: - If you do not want to source the environment vars and install oneAPI manually, you can also build the code using intel docker container: [oneAPI-basekit](https://hub.docker.com/r/intel/oneapi-basekit). Then, you can use the commands given above. - -Check [Optimizing and Running LLaMA2 on Intel® CPU](https://www.intel.com/content/www/us/en/content-details/791610/optimizing-and-running-llama2-on-intel-cpu.html) for more information. - -### CUDA - -This provides GPU acceleration using the CUDA cores of your Nvidia GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads). - -For Jetson user, if you have Jetson Orin, you can try this: [Offical Support](https://www.jetson-ai-lab.com/tutorial_text-generation.html). If you are using an old model(nano/TX2), need some additional operations before compiling. - -- Using `make`: - ```bash - make GGML_CUDA=1 - ``` -- Using `CMake`: - - ```bash - cmake -B build -DGGML_CUDA=ON - cmake --build build --config Release - ``` - -The environment variable [`CUDA_VISIBLE_DEVICES`](https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#env-vars) can be used to specify which GPU(s) will be used. The following compilation options are also available to tweak performance: - -| Option | Legal values | Default | Description | -|-------------------------------|------------------------|---------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GGML_CUDA_FORCE_DMMV | Boolean | false | Force the use of dequantization + matrix vector multiplication kernels instead of using kernels that do matrix vector multiplication on quantized data. By default the decision is made based on compute capability (MMVQ for 6.1/Pascal/GTX 1000 or higher). Does not affect k-quants. | -| GGML_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the CUDA dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. | -| GGML_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the CUDA mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. | -| GGML_CUDA_FORCE_MMQ | Boolean | false | Force the use of custom matrix multiplication kernels for quantized models instead of FP16 cuBLAS even if there is no int8 tensor core implementation available (affects V100, RDNA3). MMQ kernels are enabled by default on GPUs with int8 tensor core support. With MMQ force enabled, speed for large batch sizes will be worse but VRAM consumption will be lower. | -| GGML_CUDA_FORCE_CUBLAS | Boolean | false | Force the use of FP16 cuBLAS instead of custom matrix multiplication kernels for quantized models | -| GGML_CUDA_F16 | Boolean | false | If enabled, use half-precision floating point arithmetic for the CUDA dequantization + mul mat vec kernels and for the q4_1 and q5_1 matrix matrix multiplication kernels. Can improve performance on relatively recent GPUs. | -| GGML_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per CUDA thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. | -| GGML_CUDA_PEER_MAX_BATCH_SIZE | Positive integer | 128 | Maximum batch size for which to enable peer access between multiple GPUs. Peer access requires either Linux or NVLink. When using NVLink enabling peer access for larger batch sizes is potentially beneficial. | -| GGML_CUDA_FA_ALL_QUANTS | Boolean | false | Compile support for all KV cache quantization type (combinations) for the FlashAttention CUDA kernels. More fine-grained control over KV cache size but compilation takes much longer. | - -### hipBLAS - -This provides BLAS acceleration on HIP-supported AMD GPUs. -Make sure to have ROCm installed. -You can download it from your Linux distro's package manager or from here: [ROCm Quick Start (Linux)](https://rocm.docs.amd.com/projects/install-on-linux/en/latest/tutorial/quick-start.html#rocm-install-quick). - -- Using `make`: - ```bash - make GGML_HIPBLAS=1 - ``` -- Using `CMake` for Linux (assuming a gfx1030-compatible AMD GPU): - ```bash - HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \ - cmake -S . -B build -DGGML_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \ - && cmake --build build --config Release -- -j 16 - ``` - On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DGGML_HIP_UMA=ON`. - However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs). - - Note that if you get the following error: - ``` - clang: error: cannot find ROCm device library; provide its path via '--rocm-path' or '--rocm-device-lib-path', or pass '-nogpulib' to build without ROCm device library - ``` - Try searching for a directory under `HIP_PATH` that contains the file - `oclc_abi_version_400.bc`. Then, add the following to the start of the - command: `HIP_DEVICE_LIB_PATH=`, so something - like: - ```bash - HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -p)" \ - HIP_DEVICE_LIB_PATH= \ - cmake -S . -B build -DGGML_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \ - && cmake --build build -- -j 16 - ``` - -- Using `make` (example for target gfx1030, build with 16 CPU threads): - ```bash - make -j16 GGML_HIPBLAS=1 GGML_HIP_UMA=1 AMDGPU_TARGETS=gfx1030 - ``` - -- Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS, and assuming a gfx1100-compatible AMD GPU): - ```bash - set PATH=%HIP_PATH%\bin;%PATH% - cmake -S . -B build -G Ninja -DAMDGPU_TARGETS=gfx1100 -DGGML_HIPBLAS=ON -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_BUILD_TYPE=Release - cmake --build build - ``` - Make sure that `AMDGPU_TARGETS` is set to the GPU arch you want to compile for. The above example uses `gfx1100` that corresponds to Radeon RX 7900XTX/XT/GRE. You can find a list of targets [here](https://llvm.org/docs/AMDGPUUsage.html#processors) - Find your gpu version string by matching the most significant version information from `rocminfo | grep gfx | head -1 | awk '{print $2}'` with the list of processors, e.g. `gfx1035` maps to `gfx1030`. - - -The environment variable [`HIP_VISIBLE_DEVICES`](https://rocm.docs.amd.com/en/latest/understand/gpu_isolation.html#hip-visible-devices) can be used to specify which GPU(s) will be used. -If your GPU is not officially supported you can use the environment variable [`HSA_OVERRIDE_GFX_VERSION`] set to a similar GPU, for example 10.3.0 on RDNA2 (e.g. gfx1030, gfx1031, or gfx1035) or 11.0.0 on RDNA3. -The following compilation options are also available to tweak performance (yes, they refer to CUDA, not HIP, because it uses the same code as the cuBLAS version above): - -| Option | Legal values | Default | Description | -|------------------------|------------------------|---------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| GGML_CUDA_DMMV_X | Positive integer >= 32 | 32 | Number of values in x direction processed by the HIP dequantization + matrix vector multiplication kernel per iteration. Increasing this value can improve performance on fast GPUs. Power of 2 heavily recommended. Does not affect k-quants. | -| GGML_CUDA_MMV_Y | Positive integer | 1 | Block size in y direction for the HIP mul mat vec kernels. Increasing this value can improve performance on fast GPUs. Power of 2 recommended. Does not affect k-quants. | -| GGML_CUDA_KQUANTS_ITER | 1 or 2 | 2 | Number of values processed per iteration and per HIP thread for Q2_K and Q6_K quantization formats. Setting this value to 1 can improve performance for slow GPUs. | - -### Vulkan - -**Windows** - -#### w64devkit - -Download and extract [w64devkit](https://github.com/skeeto/w64devkit/releases). - -Download and install the [Vulkan SDK](https://vulkan.lunarg.com/sdk/home#windows). When selecting components, only the Vulkan SDK Core is required. - -Launch `w64devkit.exe` and run the following commands to copy Vulkan dependencies: -```sh -SDK_VERSION=1.3.283.0 -cp /VulkanSDK/$SDK_VERSION/Bin/glslc.exe $W64DEVKIT_HOME/bin/ -cp /VulkanSDK/$SDK_VERSION/Lib/vulkan-1.lib $W64DEVKIT_HOME/x86_64-w64-mingw32/lib/ -cp -r /VulkanSDK/$SDK_VERSION/Include/* $W64DEVKIT_HOME/x86_64-w64-mingw32/include/ -cat > $W64DEVKIT_HOME/x86_64-w64-mingw32/lib/pkgconfig/vulkan.pc < ` - -It will then build & run in the debugger for you. - -To just execute a test and get back a PASS or FAIL message run: - -```bash -./scripts/debug-test.sh test-tokenizer -``` - -To test in GDB use the `-g` flag to enable gdb test mode. - -```bash -./scripts/debug-test.sh -g test-tokenizer - -# Once in the debugger, i.e. at the chevrons prompt, setting a breakpoint could be as follows: ->>> b main -``` - -To speed up the testing loop, if you know your test number you can just run it similar to below: - -```bash -./scripts/debug-test.sh test 23 -``` - -For further reference use `debug-test.sh -h` to print help. - -  - -### How does the script work? -If you want to be able to use the concepts contained in the script separately, the important ones are briefly outlined below. - -#### Step 1: Reset and Setup folder context - -From base of this repository, let's create `build-ci-debug` as our build context. - -```bash -rm -rf build-ci-debug && mkdir build-ci-debug && cd build-ci-debug -``` - -#### Step 2: Setup Build Environment and Compile Test Binaries - -Setup and trigger a build under debug mode. You may adapt the arguments as needed, but in this case these are sane defaults. - -```bash -cmake -DCMAKE_BUILD_TYPE=Debug -DLLAMA_CUDA=1 -DLLAMA_FATAL_WARNINGS=ON .. -make -j -``` - -#### Step 3: Find all tests available that matches REGEX - -The output of this command will give you the command & arguments needed to run GDB. - -* `-R test-tokenizer` : looks for all the test files named `test-tokenizer*` (R=Regex) -* `-N` : "show-only" disables test execution & shows test commands that you can feed to GDB. -* `-V` : Verbose Mode - -```bash -ctest -R "test-tokenizer" -V -N -``` - -This may return output similar to below (focusing on key lines to pay attention to): - -```bash -... -1: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf" -1: Working Directory: . -Labels: main - Test #1: test-tokenizer-0-llama-spm -... -4: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-falcon.gguf" -4: Working Directory: . -Labels: main - Test #4: test-tokenizer-0-falcon -... -``` - -#### Step 4: Identify Test Command for Debugging - -So for test #1 above we can tell these two pieces of relevant information: -* Test Binary: `~/llama.cpp/build-ci-debug/bin/test-tokenizer-0` -* Test GGUF Model: `~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf` - -#### Step 5: Run GDB on test command - -Based on the ctest 'test command' report above we can then run a gdb session via this command below: - -```bash -gdb --args ${Test Binary} ${Test GGUF Model} -``` - -Example: - -```bash -gdb --args ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf" -``` diff --git a/docs/development/llama-star/idea-arch.key b/docs/development/llama-star/idea-arch.key deleted file mode 100755 index 3e068e707..000000000 Binary files a/docs/development/llama-star/idea-arch.key and /dev/null differ diff --git a/docs/development/llama-star/idea-arch.pdf b/docs/development/llama-star/idea-arch.pdf deleted file mode 100644 index 4fa92c71d..000000000 Binary files a/docs/development/llama-star/idea-arch.pdf and /dev/null differ diff --git a/docs/development/token_generation_performance_tips.md b/docs/development/token_generation_performance_tips.md deleted file mode 100644 index 41b7232c9..000000000 --- a/docs/development/token_generation_performance_tips.md +++ /dev/null @@ -1,40 +0,0 @@ -# Token generation performance troubleshooting - -## Verifying that the model is running on the GPU with CUDA -Make sure you compiled llama with the correct env variables according to [this guide](/docs/build.md#cuda), so that llama accepts the `-ngl N` (or `--n-gpu-layers N`) flag. When running llama, you may configure `N` to be very large, and llama will offload the maximum possible number of layers to the GPU, even if it's less than the number you configured. For example: -```shell -./llama-cli -m "path/to/model.gguf" -ngl 200000 -p "Please sir, may I have some " -``` - -When running llama, before it starts the inference work, it will output diagnostic information that shows whether cuBLAS is offloading work to the GPU. Look for these lines: -```shell -llama_model_load_internal: [cublas] offloading 60 layers to GPU -llama_model_load_internal: [cublas] offloading output layer to GPU -llama_model_load_internal: [cublas] total VRAM used: 17223 MB -... rest of inference -``` - -If you see these lines, then the GPU is being used. - -## Verifying that the CPU is not oversaturated -llama accepts a `-t N` (or `--threads N`) parameter. It's extremely important that this parameter is not too large. If your token generation is extremely slow, try setting this number to 1. If this significantly improves your token generation speed, then your CPU is being oversaturated and you need to explicitly set this parameter to the number of the physical CPU cores on your machine (even if you utilize a GPU). If in doubt, start with 1 and double the amount until you hit a performance bottleneck, then scale the number down. - -# Example of runtime flags effect on inference speed benchmark -These runs were tested on the following machine: -GPU: A6000 (48GB VRAM) -CPU: 7 physical cores -RAM: 32GB - -Model: `TheBloke_Wizard-Vicuna-30B-Uncensored-GGML/Wizard-Vicuna-30B-Uncensored.q4_0.gguf` (30B parameters, 4bit quantization, GGML) - -Run command: `./llama-cli -m "path/to/model.gguf" -p "An extremely detailed description of the 10 best ethnic dishes will follow, with recipes: " -n 1000 [additional benchmark flags]` - -Result: - -| command | tokens/second (higher is better) | -| - | - | -| -ngl 2000000 | N/A (less than 0.1) | -| -t 7 | 1.7 | -| -t 1 -ngl 2000000 | 5.5 | -| -t 7 -ngl 2000000 | 8.7 | -| -t 4 -ngl 2000000 | 9.1 | diff --git a/docs/docker.md b/docs/docker.md deleted file mode 100644 index d8922d77d..000000000 --- a/docs/docker.md +++ /dev/null @@ -1,86 +0,0 @@ -# Docker - -## Prerequisites -* Docker must be installed and running on your system. -* Create a folder to store big models & intermediate files (ex. /llama/models) - -## Images -We have three Docker images available for this project: - -1. `ghcr.io/ggerganov/llama.cpp:full`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. (platforms: `linux/amd64`, `linux/arm64`) -2. `ghcr.io/ggerganov/llama.cpp:light`: This image only includes the main executable file. (platforms: `linux/amd64`, `linux/arm64`) -3. `ghcr.io/ggerganov/llama.cpp:server`: This image only includes the server executable file. (platforms: `linux/amd64`, `linux/arm64`) - -Additionally, there the following images, similar to the above: - -- `ghcr.io/ggerganov/llama.cpp:full-cuda`: Same as `full` but compiled with CUDA support. (platforms: `linux/amd64`) -- `ghcr.io/ggerganov/llama.cpp:light-cuda`: Same as `light` but compiled with CUDA support. (platforms: `linux/amd64`) -- `ghcr.io/ggerganov/llama.cpp:server-cuda`: Same as `server` but compiled with CUDA support. (platforms: `linux/amd64`) -- `ghcr.io/ggerganov/llama.cpp:full-rocm`: Same as `full` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) -- `ghcr.io/ggerganov/llama.cpp:light-rocm`: Same as `light` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) -- `ghcr.io/ggerganov/llama.cpp:server-rocm`: Same as `server` but compiled with ROCm support. (platforms: `linux/amd64`, `linux/arm64`) - -The GPU enabled images are not currently tested by CI beyond being built. They are not built with any variation from the ones in the Dockerfiles defined in [.devops/](.devops/) and the GitHub Action defined in [.github/workflows/docker.yml](.github/workflows/docker.yml). If you need different settings (for example, a different CUDA or ROCm library, you'll need to build the images locally for now). - -## Usage - -The easiest way to download the models, convert them to ggml and optimize them is with the --all-in-one command which includes the full docker image. - -Replace `/path/to/models` below with the actual path where you downloaded the models. - -```bash -docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --all-in-one "/models/" 7B -``` - -On completion, you are ready to play! - -```bash -docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:full --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 -``` - -or with a light image: - -```bash -docker run -v /path/to/models:/models ghcr.io/ggerganov/llama.cpp:light -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 -``` - -or with a server image: - -```bash -docker run -v /path/to/models:/models -p 8000:8000 ghcr.io/ggerganov/llama.cpp:server -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 -``` - -## Docker With CUDA - -Assuming one has the [nvidia-container-toolkit](https://github.com/NVIDIA/nvidia-container-toolkit) properly installed on Linux, or is using a GPU enabled cloud, `cuBLAS` should be accessible inside the container. - -## Building Docker locally - -```bash -docker build -t local/llama.cpp:full-cuda -f .devops/full-cuda.Dockerfile . -docker build -t local/llama.cpp:light-cuda -f .devops/llama-cli-cuda.Dockerfile . -docker build -t local/llama.cpp:server-cuda -f .devops/llama-server-cuda.Dockerfile . -``` - -You may want to pass in some different `ARGS`, depending on the CUDA environment supported by your container host, as well as the GPU architecture. - -The defaults are: - -- `CUDA_VERSION` set to `11.7.1` -- `CUDA_DOCKER_ARCH` set to `all` - -The resulting images, are essentially the same as the non-CUDA images: - -1. `local/llama.cpp:full-cuda`: This image includes both the main executable file and the tools to convert LLaMA models into ggml and convert into 4-bit quantization. -2. `local/llama.cpp:light-cuda`: This image only includes the main executable file. -3. `local/llama.cpp:server-cuda`: This image only includes the server executable file. - -## Usage - -After building locally, Usage is similar to the non-CUDA examples, but you'll need to add the `--gpus` flag. You will also want to use the `--n-gpu-layers` flag. - -```bash -docker run --gpus all -v /path/to/models:/models local/llama.cpp:full-cuda --run -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 -docker run --gpus all -v /path/to/models:/models local/llama.cpp:light-cuda -m /models/7B/ggml-model-q4_0.gguf -p "Building a website can be done in 10 simple steps:" -n 512 --n-gpu-layers 1 -docker run --gpus all -v /path/to/models:/models local/llama.cpp:server-cuda -m /models/7B/ggml-model-q4_0.gguf --port 8000 --host 0.0.0.0 -n 512 --n-gpu-layers 1 -``` diff --git a/docs/install.md b/docs/install.md deleted file mode 100644 index 10a568506..000000000 --- a/docs/install.md +++ /dev/null @@ -1,39 +0,0 @@ -# Install pre-built version of llama.cpp - -## Homebrew - -On Mac and Linux, the homebrew package manager can be used via - -```sh -brew install llama.cpp -``` -The formula is automatically updated with new `llama.cpp` releases. More info: https://github.com/ggerganov/llama.cpp/discussions/7668 - -## Nix - -On Mac and Linux, the Nix package manager can be used via - -```sh -nix profile install nixpkgs#llama-cpp -``` -For flake enabled installs. - -Or - -```sh -nix-env --file '' --install --attr llama-cpp -``` - -For non-flake enabled installs. - -This expression is automatically updated within the [nixpkgs repo](https://github.com/NixOS/nixpkgs/blob/nixos-24.05/pkgs/by-name/ll/llama-cpp/package.nix#L164). - -## Flox - -On Mac and Linux, Flox can be used to install llama.cpp within a Flox environment via - -```sh -flox install llama-cpp -``` - -Flox follows the nixpkgs build of llama.cpp. diff --git a/examples/Miku.sh b/examples/Miku.sh deleted file mode 100755 index 0f6c8c878..000000000 --- a/examples/Miku.sh +++ /dev/null @@ -1,50 +0,0 @@ -#!/bin/bash -set -e - -AI_NAME="${AI_NAME:-Miku}" -MODEL="${MODEL:-./models/llama-2-7b-chat.ggmlv3.q4_K_M.bin}" -USER_NAME="${USER_NAME:-Anon}" - -# Uncomment and adjust to the number of CPU cores you want to use. -#N_THREAD="${N_THREAD:-4}" -CTX_SIZE="${CTX_SIZE:-4096}" -N_PREDICTS="${N_PREDICTS:-4096}" - -GEN_OPTIONS=(--batch_size 1024 ---ctx_size "$CTX_SIZE" ---keep -1 ---repeat_last_n 256 ---repeat_penalty 1.17647 ---temp 0.6 ---mirostat 2) - -if [ -n "$N_THREAD" ]; then - GEN_OPTIONS+=(--threads "$N_THREAD") -fi - -./llama-cli "${GEN_OPTIONS[@]}" \ - --model "$MODEL" \ - --in-prefix " " \ - --in-suffix "${AI_NAME}:" \ - --n_predict "$N_PREDICTS" \ - --color --interactive \ - --reverse-prompt "${USER_NAME}:" \ - --prompt "This is a transcript of a 1000 page, never ending conversation between ${USER_NAME} and the cute and helpful AI assistant ${AI_NAME}. ${AI_NAME} is a girl who is an AI running on the user's computer. -${AI_NAME} can think for herself without the user seeing her thoughts by adding a /think prefix to her output. She uses this to reason about the world and to think about what she should say next. -${AI_NAME} is always coherent and makes sense, but if she isn't sure if what she is saying is correct, she will ask the user for help. -${AI_NAME} is a very helpful AI and will help the user with anything they need. She is also very friendly and will try to make the user feel better if they are sad. -${AI_NAME} is also very curious and will ask the user a lot of questions about themselves and their life. She will also try to make the user like her. -The conversation is only between ${USER_NAME} and ${AI_NAME}. -The conversation is only through text, so ${AI_NAME} can't see ${USER_NAME}'s face or hear his voice. -${AI_NAME} can only communicate through text, so she can't send images or videos. - - -${USER_NAME}: Hello! -${AI_NAME}: /think I wonder what I should say to ${USER_NAME}? This is the first time we talk, so it's important that I make a good first impression! -${AI_NAME}: Hi! I am ${AI_NAME}, your new AI friend, assistant (or whatever you like!), it's so nice to meet you! ^_^ -${AI_NAME}: I might be an AI, but I also have feelings, so please be nice to me! :) -${USER_NAME}: I'll make sure to be nice to you! I'm so happy to have you as my assistant! -${AI_NAME}: /think It sounds like ${USER_NAME} is happy to have me as their assistant! I'm so happy too! ^_^ Glad that whole emotion thing didn't scare him off! -${AI_NAME}: /think I wonder what ${USER_NAME} likes to do in his free time? I should ask him about that! -${AI_NAME}: What do you like to do in your free time? ^_^ -${USER_NAME}:" "$@" diff --git a/examples/base-translate.sh b/examples/base-translate.sh deleted file mode 100755 index 103a52f55..000000000 --- a/examples/base-translate.sh +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash -# -# Few-shot translation example. -# Requires a base model (i.e. no fine-tuned or instruct models). -# -# Usage: -# -# cd llama.cpp -# make -j -# -# ./examples/base-translate.sh "" [extra-main-args] -# - -if [ $# -lt 2 ]; then - echo "Usage: ./base-translate.sh \"\" [extra-main-args]" - exit 1 -fi - -eargs="" -if [ $# -gt 2 ]; then - eargs="${@:3}" -fi - -ftmp="__llama.cpp_example_tmp__.txt" -trap "rm -f $ftmp" EXIT - -echo "Translate from English to French: - -=== - -sea otter, peppermint, plush girafe: - -sea otter => loutre de mer -peppermint => menthe poivrée -plush girafe => girafe peluche - -=== - -violin - -violin => violon - -=== - -phone, computer, mouse, keyboard: - -phone => téléphone -computer => ordinateur -mouse => souris -keyboard => clavier - -=== -" > $ftmp - -echo "$2 -" >> $ftmp - -model=$1 - -# generate the most likely continuation until the string "===" is found -./llama-cli -m $model -f $ftmp -n 64 --temp 0 --repeat-penalty 1.0 --no-penalize-nl -r "===" $eargs diff --git a/examples/chat-13B.bat b/examples/chat-13B.bat deleted file mode 100644 index c5c8ac6ef..000000000 --- a/examples/chat-13B.bat +++ /dev/null @@ -1,57 +0,0 @@ -@setlocal disabledelayedexpansion enableextensions -@echo off - -cd /d "%~dp0.." -if not "%errorlevel%"=="0" ( - echo Unable to change directory. - pause - exit /b 1 -) - -if not defined MODEL set "MODEL=models\13B\ggml-model-q4_0.bin" -if not defined USER_NAME set "USER_NAME=User" -if not defined AI_NAME set "AI_NAME=ChatLLaMa" -rem Adjust to the number of CPU cores you want to use. -rem if not defined N_THREAD set "N_THREAD=8" -rem Number of tokens to predict (made it larger than default because we want a long interaction) -if not defined N_PREDICTS set "N_PREDICTS=2048" -if not defined GEN_OPTIONS set "GEN_OPTIONS=--ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647" - -rem Default main script paths -set "DEFAULT_MAIN_SCRIPT_PATHS=main.exe build\bin\main.exe" - -rem Get main script path from command line arguments -set "MAIN_SCRIPT_PATH=%~1" - -rem If the main script path was not specified, try the default paths -if not defined MAIN_SCRIPT_PATH ( - for %%i in (%DEFAULT_MAIN_SCRIPT_PATHS%) do ( - if exist "%%i" set "MAIN_SCRIPT_PATH=%%i" - ) -) - -rem If the main script path was not found, tell the user how to specify it -if not defined MAIN_SCRIPT_PATH ( - echo The main script could not be found. Please provide the path to the main script as 1st argument to this script, or place the main script in one of the default locations: - echo %DEFAULT_MAIN_SCRIPT_PATHS% - pause - exit /b 1 -) - -rem Default context, feel free to edit it -set "PROMPT_TEXT=Text transcript of a never ending dialog, where %USER_NAME% interacts with an AI assistant named %AI_NAME%. %AI_NAME% is helpful, kind, honest, friendly, good at writing and never fails to answer %USER_NAME%'s requests immediately and with details and precision. There are no annotations like (30 seconds passed...) or (to himself), just what %USER_NAME% and %AI_NAME% say aloud to each other. The dialog lasts for years, the entirety of it is shared below. It's 10000 pages long. The transcript only includes text, it does not include markup like HTML and Markdown." - -rem Set a temporary variable if N_THREAD is set -if defined N_THREAD ( - set "_N_THREAD=--threads %N_THREAD%" -) else ( - set "_N_THREAD=" -) - -rem Run the script -echo "%MAIN_SCRIPT_PATH%" %GEN_OPTIONS% %_N_THREAD% ^ - --model "%MODEL%" ^ - --n_predict %N_PREDICTS% ^ - --color --interactive ^ - --reverse-prompt "%USER_NAME%:" ^ - --prompt "%PROMPT_TEXT%" diff --git a/examples/chat-13B.sh b/examples/chat-13B.sh deleted file mode 100755 index 1828903c3..000000000 --- a/examples/chat-13B.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -set -e - -cd "$(dirname "$0")/.." || exit - -MODEL="${MODEL:-./models/13B/ggml-model-q4_0.bin}" -PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat.txt} -USER_NAME="${USER_NAME:-USER}" -AI_NAME="${AI_NAME:-ChatLLaMa}" - -# Adjust to the number of CPU cores you want to use. -N_THREAD="${N_THREAD:-8}" -# Number of tokens to predict (made it larger than default because we want a long interaction) -N_PREDICTS="${N_PREDICTS:-2048}" - -# Note: you can also override the generation options by specifying them on the command line: -# For example, override the context size by doing: ./chatLLaMa --ctx_size 1024 -GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647}" - -DATE_TIME=$(date +%H:%M) -DATE_YEAR=$(date +%Y) - -PROMPT_FILE=$(mktemp -t llamacpp_prompt.XXXXXXX.txt) - -sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \ - -e "s/\[\[AI_NAME\]\]/$AI_NAME/g" \ - -e "s/\[\[DATE_TIME\]\]/$DATE_TIME/g" \ - -e "s/\[\[DATE_YEAR\]\]/$DATE_YEAR/g" \ - $PROMPT_TEMPLATE > $PROMPT_FILE - -# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS -./llama-cli $GEN_OPTIONS \ - --model "$MODEL" \ - --threads "$N_THREAD" \ - --n_predict "$N_PREDICTS" \ - --color --interactive \ - --file ${PROMPT_FILE} \ - --reverse-prompt "${USER_NAME}:" \ - --in-prefix ' ' \ - "$@" diff --git a/examples/chat-persistent.sh b/examples/chat-persistent.sh deleted file mode 100755 index d9cab9836..000000000 --- a/examples/chat-persistent.sh +++ /dev/null @@ -1,151 +0,0 @@ -#!/bin/bash - -set -euo pipefail - -cd "$(dirname "$0")/.." || exit - -if [[ -z "${PROMPT_CACHE_FILE+x}" || -z "${CHAT_SAVE_DIR+x}" ]]; then - echo >&2 "error: PROMPT_CACHE_FILE and CHAT_SAVE_DIR must be provided" - exit 1 -fi - -MODEL="${MODEL:-./models/llama-13b/ggml-model-q4_0.gguf}" -PROMPT_TEMPLATE="${PROMPT_TEMPLATE:-./prompts/chat.txt}" -USER_NAME="${USER_NAME:-User}" -AI_NAME="${AI_NAME:-ChatLLaMa}" -DATE_TIME="$(date +%H:%M)" -DATE_YEAR="$(date +%Y)" - -LOG="${CHAT_SAVE_DIR}/main.log" -LOG_BG="${CHAT_SAVE_DIR}/main-bg.log" -CUR_PROMPT_FILE="${CHAT_SAVE_DIR}/current-prompt.txt" -CUR_PROMPT_CACHE="${CHAT_SAVE_DIR}/current-cache.bin" -NEXT_PROMPT_FILE="${CHAT_SAVE_DIR}/next-prompt.txt" -NEXT_PROMPT_CACHE="${CHAT_SAVE_DIR}/next-cache.bin" - -SESSION_SIZE_MSG_PATTERN='main: session file matches [[:digit:]]+ / [[:digit:]]+' -SAMPLE_TIME_MSG_PATTERN='sample time =[[:space:]]+[[:digit:]]+.[[:digit:]]+ ms /[[:space:]]+[[:digit:]]+' -SED_DELETE_MESSAGES="/^(${USER_NAME}:|${AI_NAME}:|\\.\\.\\.)/,\$d" - -CTX_SIZE=2048 -CTX_ROTATE_POINT=$((CTX_SIZE * 3 / 5)) # REVIEW -OPTS=(--model "$MODEL" --ctx_size "$CTX_SIZE" --repeat_last_n 256 "$@") - -# An unbuffered `tail -c+N` -skip_bytes() { - LANG=C IFS= read -r -n "$1" -d '' c - while LANG=C IFS= read -r -n 1 -d '' c; do - printf '%s' "$c" - done -} - -mkdir -p "$CHAT_SAVE_DIR" -echo >"$LOG" -trap "tail -n100 ${LOG}" EXIT - -if [[ ! -e "$CUR_PROMPT_FILE" ]]; then - sed -e "s/\[\[USER_NAME\]\]/${USER_NAME}/g" \ - -e "s/\[\[AI_NAME\]\]/${AI_NAME}/g" \ - -e "s/\[\[DATE_TIME\]\]/${DATE_TIME}/g" \ - -e "s/\[\[DATE_YEAR\]\]/${DATE_YEAR}/g" \ - "$PROMPT_TEMPLATE" >"$CUR_PROMPT_FILE" -fi - -if [[ ! -e "$NEXT_PROMPT_FILE" ]]; then - sed -r "$SED_DELETE_MESSAGES" "$CUR_PROMPT_FILE" >"$NEXT_PROMPT_FILE" -fi - -if [[ "$(tail -c4 "$NEXT_PROMPT_FILE")" != "..." ]]; then - echo '...' >>"$NEXT_PROMPT_FILE" -fi - -if [[ ! -e "$PROMPT_CACHE_FILE" ]]; then - echo 'Prompt cache does not exist, building...' - # Default batch_size to 64 here for better user feedback during initial prompt processing - ./llama-cli 2>>"$LOG" \ - --batch_size 64 \ - "${OPTS[@]}" \ - --prompt-cache "$PROMPT_CACHE_FILE" \ - --file "$CUR_PROMPT_FILE" \ - --n_predict 1 - echo - echo 'Done!' -fi - -if [[ ! -e "$CUR_PROMPT_CACHE" ]]; then - cp "$PROMPT_CACHE_FILE" "$CUR_PROMPT_CACHE" -fi -if [[ ! -e "$NEXT_PROMPT_CACHE" ]]; then - cp "$PROMPT_CACHE_FILE" "$NEXT_PROMPT_CACHE" -fi - -printf '%s ' "$(< "$CUR_PROMPT_FILE")" -n_tokens=0 - -while read -e line; do - # Limit generation to remaining context, with a buffer and estimating 2 chars/token for input - n_predict=$((CTX_SIZE - n_tokens - ${#line} / 2 - 32)) - - # Swap prompts when we're about to run out of context - if ((n_predict <= 0)); then - wait # for background main (below) to finish with next prompt - mv "$NEXT_PROMPT_FILE" "$CUR_PROMPT_FILE" - mv "$NEXT_PROMPT_CACHE" "$CUR_PROMPT_CACHE" - - sed -r "$SED_DELETE_MESSAGES" "$CUR_PROMPT_FILE" >"$NEXT_PROMPT_FILE" - echo '...' >>"$NEXT_PROMPT_FILE" - cp "$PROMPT_CACHE_FILE" "$NEXT_PROMPT_CACHE" - - n_tokens=0 - n_predict=$((CTX_SIZE / 2)) - fi - - echo " ${line}" >>"$CUR_PROMPT_FILE" - if ((n_tokens > CTX_ROTATE_POINT)); then - echo " ${line}" >>"$NEXT_PROMPT_FILE" - fi - - n_prompt_len_pre=$(($(wc -c <"$CUR_PROMPT_FILE"))) - - printf '%s: ' "$AI_NAME" >>"$CUR_PROMPT_FILE" - - ./llama-cli 2>>"$LOG" "${OPTS[@]}" \ - --prompt-cache "$CUR_PROMPT_CACHE" \ - --prompt-cache-all \ - --file "$CUR_PROMPT_FILE" \ - --reverse-prompt "${USER_NAME}:" \ - --n_predict "$n_predict" | - skip_bytes 1 | # skip BOS token added by ./llama-cli - tee "$CUR_PROMPT_FILE.tmp" | # save prompt + generation to tmp file - skip_bytes "$n_prompt_len_pre" # print generation - - mv "$CUR_PROMPT_FILE.tmp" "$CUR_PROMPT_FILE" - - # if we hit n_predict instead of reverse-prompt, we need to add the prompt - if [[ "$(tail -n1 "$CUR_PROMPT_FILE")" != "${USER_NAME}:" ]]; then - printf '\n%s:' "$USER_NAME" - printf '\n%s:' "$USER_NAME" >> "$CUR_PROMPT_FILE" - fi - - printf ' ' - - # HACK get num tokens from debug message - # TODO get both messages in one go - if ! session_size_msg="$(tail -n30 "$LOG" | grep -oE "$SESSION_SIZE_MSG_PATTERN")" || - ! sample_time_msg="$(tail -n10 "$LOG" | grep -oE "$SAMPLE_TIME_MSG_PATTERN")"; then - echo >&2 "Couldn't get number of tokens from ./llama-cli output!" - exit 1 - fi - - n_tokens=$(($(cut -d/ -f2 <<<"$session_size_msg") + $(cut -d/ -f2 <<<"$sample_time_msg"))) - - if ((n_tokens > CTX_ROTATE_POINT)); then - tail -c+$((n_prompt_len_pre + 1)) "$CUR_PROMPT_FILE" >>"$NEXT_PROMPT_FILE" - fi - - # Update cache for next prompt in background, ideally during user input - ./llama-cli >>"$LOG_BG" 2>&1 "${OPTS[@]}" \ - --prompt-cache "$NEXT_PROMPT_CACHE" \ - --file "$NEXT_PROMPT_FILE" \ - --n_predict 1 & -done diff --git a/examples/chat-vicuna.sh b/examples/chat-vicuna.sh deleted file mode 100755 index ffdd20084..000000000 --- a/examples/chat-vicuna.sh +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/bash - -set -e - -cd "$(dirname "$0")/.." || exit - -MODEL="${MODEL:-./models/ggml-vic13b-uncensored-q5_0.bin}" -PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat.txt} -USER_NAME="### Human" -AI_NAME="### Assistant" - -# Adjust to the number of CPU cores you want to use. -N_THREAD="${N_THREAD:-8}" -# Number of tokens to predict (made it larger than default because we want a long interaction) -N_PREDICTS="${N_PREDICTS:-2048}" - -# Note: you can also override the generation options by specifying them on the command line: -# For example, override the context size by doing: ./chatLLaMa --ctx_size 1024 -GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 2048 --temp 0.7 --top_k 40 --top_p 0.5 --repeat_last_n 256 --batch_size 1024 --repeat_penalty 1.17647}" - -DATE_TIME=$(date +%H:%M) -DATE_YEAR=$(date +%Y) - -PROMPT_FILE=$(mktemp -t llamacpp_prompt.XXXXXXX.txt) - -sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \ - -e "s/\[\[AI_NAME\]\]/$AI_NAME/g" \ - -e "s/\[\[DATE_TIME\]\]/$DATE_TIME/g" \ - -e "s/\[\[DATE_YEAR\]\]/$DATE_YEAR/g" \ - $PROMPT_TEMPLATE > $PROMPT_FILE - -# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS -./bin/llama-cli $GEN_OPTIONS \ - --model "$MODEL" \ - --threads "$N_THREAD" \ - --n_predict "$N_PREDICTS" \ - --color --interactive \ - --file ${PROMPT_FILE} \ - --reverse-prompt "### Human:" \ - --in-prefix ' ' \ - "$@" diff --git a/examples/chat.sh b/examples/chat.sh deleted file mode 100755 index 9f85d1e26..000000000 --- a/examples/chat.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# -# Temporary script - will be removed in the future -# - -cd `dirname $0` -cd .. - -# Important: -# -# "--keep 48" is based on the contents of prompts/chat-with-bob.txt -# -./llama-cli -m ./models/llama-7b/ggml-model-q4_0.gguf -c 512 -b 1024 -n 256 --keep 48 \ - --repeat_penalty 1.0 --color -i \ - -r "User:" -f prompts/chat-with-bob.txt diff --git a/examples/convert_legacy_llama.py b/examples/convert_legacy_llama.py deleted file mode 100755 index 9ab9ab06e..000000000 --- a/examples/convert_legacy_llama.py +++ /dev/null @@ -1,1440 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import annotations - -import logging -import argparse -import concurrent.futures -import enum -import faulthandler -import functools -import itertools -import json -import math -import mmap -import os -import pickle -import re -import signal -import struct -import sys -import textwrap -import time -import zipfile -from abc import ABC, abstractmethod -from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor -from dataclasses import dataclass -from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable, IO, Iterable, Literal, TypeVar - -import numpy as np - -if 'NO_LOCAL_GGUF' not in os.environ: - # use .parent.parent since we are in "examples" directory - sys.path.insert(1, str(Path(__file__).parent.parent / 'gguf-py')) - -import gguf -from gguf import BaseVocab, Vocab, NoVocab, BpeVocab, SentencePieceVocab, LlamaHfVocab - -if TYPE_CHECKING: - from typing_extensions import Self, TypeAlias - -logger = logging.getLogger("convert") - -if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'): - faulthandler.register(signal.SIGUSR1) - -NDArray: TypeAlias = 'np.ndarray[Any, Any]' - -ARCH = gguf.MODEL_ARCH.LLAMA - -DEFAULT_CONCURRENCY = 8 - -ADDED_TOKENS_FILE = 'added_tokens.json' -FAST_TOKENIZER_FILE = 'tokenizer.json' - -# -# data types -# - - -@dataclass(frozen=True) -class DataType: - name: str - dtype: np.dtype[Any] - valid_conversions: list[str] - - def elements_to_bytes(self, n_elements: int) -> int: - return n_elements * self.dtype.itemsize - - -@dataclass(frozen=True) -class UnquantizedDataType(DataType): - pass - - -DT_F16 = UnquantizedDataType('F16', dtype = np.dtype(np.float16), valid_conversions = ['F32', 'Q8_0']) -DT_F32 = UnquantizedDataType('F32', dtype = np.dtype(np.float32), valid_conversions = ['F16', 'Q8_0']) -DT_I32 = UnquantizedDataType('I32', dtype = np.dtype(np.int16), valid_conversions = []) -DT_BF16 = UnquantizedDataType('BF16', dtype = np.dtype(np.uint16), valid_conversions = ['F32', 'F16', 'Q8_0']) - - -@dataclass(frozen=True) -class QuantizedDataType(DataType): - block_size: int - quantized_dtype: np.dtype[Any] - ggml_type: gguf.GGMLQuantizationType - - def quantize(self, arr: NDArray) -> NDArray: - raise NotImplementedError(f'Quantization for {self.name} not implemented') - - def elements_to_bytes(self, n_elements: int) -> int: - assert n_elements % self.block_size == 0, f'Invalid number of elements {n_elements} for {self.name} with block size {self.block_size}' - return self.quantized_dtype.itemsize * (n_elements // self.block_size) - - -@dataclass(frozen=True) -class Q8_0QuantizedDataType(QuantizedDataType): - # Mini Q8_0 quantization in Python! - def quantize(self, arr: NDArray) -> NDArray: - assert arr.size % self.block_size == 0 and arr.size != 0, f'Bad array size {arr.size}' - assert arr.dtype == np.float32, f'Bad array type {arr.dtype}' - n_blocks = arr.size // self.block_size - blocks = arr.reshape((n_blocks, self.block_size)) - # Much faster implementation of block quantization contributed by @Cebtenzzre - - def quantize_blocks_q8_0(blocks: NDArray) -> Iterable[tuple[Any, Any]]: - d = abs(blocks).max(axis = 1) / np.float32(127) - with np.errstate(divide = 'ignore'): - qs = (blocks / d[:, None]).round() - qs[d == 0] = 0 - yield from zip(d, qs) - return np.fromiter(quantize_blocks_q8_0(blocks), count = n_blocks, dtype = self.quantized_dtype) - - -DT_Q8_0 = Q8_0QuantizedDataType('Q8_0', - dtype = np.dtype(np.float32), valid_conversions = [], - ggml_type = gguf.GGMLQuantizationType.Q8_0, block_size = 32, - quantized_dtype = np.dtype([('d', ' DataType: - dt = GGML_FILE_TYPE_TO_DATA_TYPE.get(self) - if dt is None: - raise ValueError(self) - # Convert all 1D tensors to F32. Most of the codebase that takes in 1D tensors only handles F32 tensors, and most of the outputs tensors are F32. - # Also The 1d tensors aren't much of a performance/size issue. So instead of having to have separate F32 and F16 implementations of both, just convert everything to F32 for now. - return dt if len(tensor.shape) > 1 else DT_F32 - - -GGML_FILE_TYPE_TO_DATA_TYPE: dict[GGMLFileType, DataType] = { - GGMLFileType.AllF32 : DT_F32, - GGMLFileType.MostlyF16 : DT_F16, - GGMLFileType.MostlyQ8_0: DT_Q8_0, -} - -# -# hparams loading -# - - -@dataclass -class Params: - n_vocab: int - n_embd: int - n_layer: int - n_ctx: int - n_ff: int - n_head: int - n_head_kv: int - n_experts: int | None = None - n_experts_used: int | None = None - f_norm_eps: float | None = None - - rope_scaling_type: gguf.RopeScalingType | None = None - f_rope_freq_base: float | None = None - f_rope_scale: float | None = None - n_ctx_orig: int | None = None - rope_finetuned: bool | None = None - - ftype: GGMLFileType | None = None - - # path to the directory containing the model files - path_model: Path | None = None - - @staticmethod - def guessed(model: LazyModel) -> Params: - # try transformer naming first - n_vocab, n_embd = model["model.embed_tokens.weight"].shape if "model.embed_tokens.weight" in model else model["tok_embeddings.weight"].shape - - # try transformer naming first - if "model.layers.0.self_attn.q_proj.weight" in model: - n_layer = next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model) - elif "model.layers.0.self_attn.W_pack.weight" in model: # next: try baichuan naming - n_layer = next(i for i in itertools.count() if f"model.layers.{i}.self_attn.W_pack.weight" not in model) - else: - n_layer = next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model) - - if n_layer < 1: - msg = """\ - failed to guess 'n_layer'. This model is unknown or unsupported. - Suggestion: provide 'config.json' of the model in the same directory containing model files.""" - raise KeyError(textwrap.dedent(msg)) - - n_head = n_embd // 128 # guessed - n_mult = 256 # guessed - - # TODO: verify this - n_ff = int(2 * (4 * n_embd) / 3) - n_ff = n_mult * ((n_ff + n_mult - 1) // n_mult) - - return Params( - n_vocab = n_vocab, - n_embd = n_embd, - n_layer = n_layer, - n_ctx = -1, - n_ff = n_ff, - n_head = n_head, - n_head_kv = n_head, - f_norm_eps = 1e-5, - ) - - @staticmethod - def loadHFTransformerJson(model: LazyModel, config_path: Path) -> Params: - with open(config_path) as f: - config = json.load(f) - - rope_scaling_type = f_rope_scale = n_ctx_orig = rope_finetuned = None - rope_scaling = config.get("rope_scaling") - - if rope_scaling is not None and (typ := rope_scaling.get("type")): - rope_factor = rope_scaling.get("factor") - f_rope_scale = rope_factor - if typ == "linear": - rope_scaling_type = gguf.RopeScalingType.LINEAR - elif typ == "yarn": - rope_scaling_type = gguf.RopeScalingType.YARN - n_ctx_orig = rope_scaling['original_max_position_embeddings'] - rope_finetuned = rope_scaling['finetuned'] - else: - raise NotImplementedError(f'Unknown rope scaling type: {typ}') - - if "max_sequence_length" in config: - n_ctx = config["max_sequence_length"] - elif "max_position_embeddings" in config: - n_ctx = config["max_position_embeddings"] - else: - msg = """\ - failed to guess 'n_ctx'. This model is unknown or unsupported. - Suggestion: provide 'config.json' of the model in the same directory containing model files.""" - raise KeyError(textwrap.dedent(msg)) - - n_experts = None - n_experts_used = None - - if "num_local_experts" in config: - n_experts = config["num_local_experts"] - n_experts_used = config["num_experts_per_tok"] - - return Params( - n_vocab = config["vocab_size"], - n_embd = config["hidden_size"], - n_layer = config["num_hidden_layers"], - n_ctx = n_ctx, - n_ff = config["intermediate_size"], - n_head = (n_head := config["num_attention_heads"]), - n_head_kv = config.get("num_key_value_heads", n_head), - n_experts = n_experts, - n_experts_used = n_experts_used, - f_norm_eps = config["rms_norm_eps"], - f_rope_freq_base = config.get("rope_theta"), - rope_scaling_type = rope_scaling_type, - f_rope_scale = f_rope_scale, - n_ctx_orig = n_ctx_orig, - rope_finetuned = rope_finetuned, - ) - - # LLaMA v2 70B params.json - # {"dim": 8192, "multiple_of": 4096, "ffn_dim_multiplier": 1.3, "n_heads": 64, "n_kv_heads": 8, "n_layers": 80, "norm_eps": 1e-05, "vocab_size": -1} - @staticmethod - def loadOriginalParamsJson(model: LazyModel, config_path: Path) -> Params: - with open(config_path) as f: - config = json.load(f) - - n_experts = None - n_experts_used = None - f_rope_freq_base = None - n_ff = None - - # hack to determine LLaMA v1 vs v2 vs CodeLlama - if config.get("moe"): - # Mixtral - n_ctx = 32768 - elif config.get("rope_theta") == 1000000: - # CodeLlama - n_ctx = 16384 - elif config["norm_eps"] == 1e-05: - # LLaMA v2 - n_ctx = 4096 - else: - # LLaMA v1 - n_ctx = 2048 - - if "layers.0.feed_forward.w1.weight" in model: - n_ff = model["layers.0.feed_forward.w1.weight"].shape[0] - - if config.get("moe"): - n_ff = model["layers.0.feed_forward.experts.0.w1.weight"].shape[0] - n_experts = config["moe"]["num_experts"] - n_experts_used = config["moe"]["num_experts_per_tok"] - f_rope_freq_base = 1e6 - - assert n_ff is not None - - return Params( - n_vocab = model["tok_embeddings.weight"].shape[0], - n_embd = config["dim"], - n_layer = config["n_layers"], - n_ctx = n_ctx, - n_ff = n_ff, - n_head = (n_head := config["n_heads"]), - n_head_kv = config.get("n_kv_heads", n_head), - n_experts = n_experts, - n_experts_used = n_experts_used, - f_norm_eps = config["norm_eps"], - f_rope_freq_base = config.get("rope_theta", f_rope_freq_base), - ) - - @staticmethod - def load(model_plus: ModelPlus) -> Params: - hf_config_path = model_plus.paths[0].parent / "config.json" - orig_config_path = model_plus.paths[0].parent / "params.json" - - if hf_config_path.exists(): - params = Params.loadHFTransformerJson(model_plus.model, hf_config_path) - elif orig_config_path.exists(): - params = Params.loadOriginalParamsJson(model_plus.model, orig_config_path) - elif model_plus.format != 'none': - params = Params.guessed(model_plus.model) - else: - raise ValueError('Cannot guess params when model format is none') - - params.path_model = model_plus.paths[0].parent - - return params - - -# -# data loading -# TODO: reuse (probably move to gguf.py?) -# - - -def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray: - if n_head_kv is not None and n_head != n_head_kv: - n_head = n_head_kv - return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:]) - .swapaxes(1, 2) - .reshape(weights.shape)) - - -class Tensor(ABC): - ndarray: NDArray - data_type: DataType - - @abstractmethod - def astype(self, data_type: DataType) -> Self: ... - @abstractmethod - def permute(self, n_head: int, n_head_kv: int) -> Self: ... - @abstractmethod - def permute_part(self, n_part: int, n_head: int, n_head_kv: int) -> Self: ... - @abstractmethod - def part(self, n_part: int) -> Self: ... - @abstractmethod - def to_ggml(self) -> GGMLCompatibleTensor: ... - - -def bf16_to_fp32(bf16_arr: np.ndarray[Any, np.dtype[np.uint16]]) -> NDArray: - assert bf16_arr.dtype == np.uint16, f"Input array should be of dtype uint16, but got {bf16_arr.dtype}" - fp32_arr = bf16_arr.astype(np.uint32) << 16 - return fp32_arr.view(np.float32) - - -class UnquantizedTensor(Tensor): - def __init__(self, ndarray: NDArray): - assert isinstance(ndarray, np.ndarray) - self.ndarray = ndarray - self.data_type = NUMPY_TYPE_TO_DATA_TYPE[ndarray.dtype] - - def astype(self, data_type: DataType) -> UnquantizedTensor: - dtype = data_type.dtype - if self.data_type == DT_BF16: - self.ndarray = bf16_to_fp32(self.ndarray) - return UnquantizedTensor(self.ndarray.astype(dtype)) - - def to_ggml(self) -> Self: - return self - - def permute_part(self, n_part: int, n_head: int, n_head_kv: int) -> UnquantizedTensor: - r = self.ndarray.shape[0] // 3 - return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head, n_head_kv)) - - def part(self, n_part: int) -> UnquantizedTensor: - r = self.ndarray.shape[0] // 3 - return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...]) - - def permute(self, n_head: int, n_head_kv: int) -> UnquantizedTensor: - return UnquantizedTensor(permute(self.ndarray, n_head, n_head_kv)) - - -def load_unquantized(lazy_tensor: LazyTensor, expected_dtype: Any = None, convert: bool = False) -> NDArray: - tensor = lazy_tensor.load() - assert isinstance(tensor, UnquantizedTensor) - - # double-check: - actual_shape = list(tensor.ndarray.shape) - assert actual_shape == lazy_tensor.shape, (actual_shape, lazy_tensor.shape) - if expected_dtype is not None and expected_dtype != tensor.ndarray.dtype: - if convert: - tensor.ndarray = tensor.ndarray.astype(expected_dtype) - else: - raise ValueError(f'expected this tensor to have dtype {expected_dtype}, got {tensor.ndarray.dtype}') - - return tensor.ndarray - - -GGMLCompatibleTensor = UnquantizedTensor - - -@dataclass -class LazyTensor: - _load: Callable[[], Tensor] - shape: list[int] - data_type: DataType - description: str - - def load(self) -> Tensor: - ret = self._load() - # Should be okay if it maps to the same numpy type? - assert ret.data_type == self.data_type or (self.data_type.dtype == ret.data_type.dtype), \ - (self.data_type, ret.data_type, self.description) - return ret - - def astype(self, data_type: DataType) -> LazyTensor: - self.validate_conversion_to(data_type) - - def load() -> Tensor: - return self.load().astype(data_type) - return LazyTensor(load, self.shape, data_type, f'convert({data_type}) {self.description}') - - def validate_conversion_to(self, data_type: DataType) -> None: - if data_type != self.data_type and data_type.name not in self.data_type.valid_conversions: - raise ValueError(f'Cannot validate conversion from {self.data_type} to {data_type}.') - - -LazyModel: TypeAlias = 'dict[str, LazyTensor]' - -ModelFormat: TypeAlias = Literal['ggml', 'torch', 'safetensors', 'none'] - -@dataclass -class ModelPlus: - model: LazyModel - paths: list[Path] # Where this was read from. - format: ModelFormat - vocab: BaseVocab | None # For GGML models (which have vocab built in), the vocab. - - -def merge_sharded(models: list[LazyModel]) -> LazyModel: - # Original LLaMA models have each file contain one part of each tensor. - # Use a dict instead of a set to preserve order. - names = {name: None for model in models for name in model} - - def convert(name: str) -> LazyTensor: - lazy_tensors = [model[name] for model in models] - if len(lazy_tensors) == 1: - # only one file; don't go through this procedure since there might - # be quantized tensors - return lazy_tensors[0] - if len(lazy_tensors[0].shape) == 1: - # the tensor is just duplicated in every file - return lazy_tensors[0] - if name.startswith('tok_embeddings.') or \ - name.endswith('.attention.wo.weight') or \ - name.endswith('.feed_forward.w2.weight'): - # split by columns - axis = 1 - else: - # split by rows - axis = 0 - concatenated_shape = list(lazy_tensors[0].shape) - concatenated_shape[axis] = sum(tensor.shape[axis] for tensor in lazy_tensors) - - def load() -> UnquantizedTensor: - ndarrays = [load_unquantized(tensor) for tensor in lazy_tensors] - concatenated = np.concatenate(ndarrays, axis=axis) - return UnquantizedTensor(concatenated) - description = 'concatenated[[' + '] | ['.join(lt.description for lt in lazy_tensors) + ']]' - return LazyTensor(load, concatenated_shape, lazy_tensors[0].data_type, description) - return {name: convert(name) for name in names} - - -def merge_multifile_models(models_plus: list[ModelPlus]) -> ModelPlus: - formats: set[ModelFormat] = set(mp.format for mp in models_plus) - assert len(formats) == 1, "different formats?" - format = formats.pop() - paths = [path for mp in models_plus for path in mp.paths] - # Use the first non-None vocab, if any. - try: - vocab = next(mp.vocab for mp in models_plus if mp.vocab is not None) - except StopIteration: - vocab = None - - if any("model.embed_tokens.weight" in mp.model for mp in models_plus): - # Transformers models put different tensors in different files, but - # don't split individual tensors between files. - model: LazyModel = {} - for mp in models_plus: - model.update(mp.model) - else: - model = merge_sharded([mp.model for mp in models_plus]) - - return ModelPlus(model, paths, format, vocab) - - -def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_head_kv: int) -> LazyTensor: - def load() -> Tensor: - return lazy_tensor.load().permute(n_head, n_head_kv) - return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_head_kv}) ' + lazy_tensor.description) - - -def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int, n_head_kv: int) -> LazyTensor: - def load() -> Tensor: - return lazy_tensor.load().permute_part(n_part, n_head, n_head_kv) - s = lazy_tensor.shape.copy() - s[0] = s[0] // 3 - return LazyTensor(load, s, lazy_tensor.data_type, f'permute({n_head}, {n_head_kv}) ' + lazy_tensor.description) - - -def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor: - def load() -> Tensor: - return lazy_tensor.load().part(n_part) - s = lazy_tensor.shape.copy() - s[0] = s[0] // 3 - return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description) - - -def pack_experts_lazy(lazy_tensors: list[LazyTensor]) -> LazyTensor: - def load() -> Tensor: - tensors = [lazy_tensor.load() for lazy_tensor in lazy_tensors] - return UnquantizedTensor(np.array([tensor.ndarray for tensor in tensors])) - s = lazy_tensors[0].shape.copy() - s.insert(0, len(lazy_tensors)) - return LazyTensor(load, s, lazy_tensors[0].data_type, 'pack_experts ' + ' | '.join(lt.description for lt in lazy_tensors)) - - -# Functionality that simulates `torch.load` but where individual tensors are -# only loaded into memory on demand, not all at once. -# PyTorch can't do this natively as of time of writing: -# - https://github.com/pytorch/pytorch/issues/64327 -# This allows us to de-shard without multiplying RAM usage, and also -# conveniently drops the PyTorch dependency (though we still need numpy). - - -@dataclass -class LazyStorageKind: - data_type: DataType - - -@dataclass -class LazyStorage: - load: Callable[[int, int], NDArray] - kind: LazyStorageKind - description: str - - -class LazyUnpickler(pickle.Unpickler): - def __init__(self, fp: IO[bytes], data_base_path: str, zip_file: zipfile.ZipFile): - super().__init__(fp) - self.data_base_path = data_base_path - self.zip_file = zip_file - - def persistent_load(self, pid: Any) -> Any: - assert pid[0] == 'storage' - assert isinstance(pid[1], LazyStorageKind) - data_type = pid[1].data_type - filename_stem = pid[2] - filename = f'{self.data_base_path}/{filename_stem}' - info = self.zip_file.getinfo(filename) - - def load(offset: int, elm_count: int) -> NDArray: - dtype = data_type.dtype - with self.zip_file.open(info) as fp: - fp.seek(offset * dtype.itemsize) - size = elm_count * dtype.itemsize - data = fp.read(size) - assert len(data) == size - return np.frombuffer(data, dtype) - description = f'storage data_type={data_type} path-in-zip={filename} path={self.zip_file.filename}' - return LazyStorage(load=load, kind=pid[1], description=description) - - @staticmethod - def lazy_rebuild_tensor_v2(storage: Any, storage_offset: Any, size: Any, stride: Any, - requires_grad: Any, backward_hooks: Any, metadata: Any = None) -> LazyTensor: - assert isinstance(storage, LazyStorage) - - def load() -> UnquantizedTensor: - elm_count = stride[0] * size[0] - return UnquantizedTensor(storage.load(storage_offset, elm_count).reshape(size)) - description = f'pickled storage_offset={storage_offset} in {storage.description}' - return LazyTensor(load, list(size), storage.kind.data_type, description) - - @staticmethod - def rebuild_from_type_v2(func, new_type, args, state): - return func(*args) - - CLASSES: dict[tuple[str, str], type[LazyTensor] | LazyStorageKind] = { - # getattr used here as a workaround for mypy not being smart enough to determine - # the staticmethods have a __func__ attribute. - ('torch._tensor', '_rebuild_from_type_v2'): getattr(rebuild_from_type_v2, '__func__'), - ('torch._utils', '_rebuild_tensor_v2'): getattr(lazy_rebuild_tensor_v2, '__func__'), - ('torch', 'BFloat16Storage'): LazyStorageKind(DT_BF16), - ('torch', 'HalfStorage'): LazyStorageKind(DT_F16), - ('torch', 'FloatStorage'): LazyStorageKind(DT_F32), - ('torch', 'IntStorage'): LazyStorageKind(DT_I32), - ('torch', 'Tensor'): LazyTensor, - } - - def find_class(self, module: str, name: str) -> Any: - if not module.startswith('torch'): - return super().find_class(module, name) - return self.CLASSES[(module, name)] - - -def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus: - zf = zipfile.ZipFile(outer_fp) - pickle_paths = [name for name in zf.namelist() if name.endswith('.pkl')] - assert len(pickle_paths) == 1, pickle_paths - pickle_fp = zf.open(pickle_paths[0], 'r') - unpickler = LazyUnpickler(pickle_fp, - data_base_path=pickle_paths[0][:-4], - zip_file=zf) - model = unpickler.load() - if 'model' in model: model = model['model'] - as_dict = dict(model.items()) - return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None) - - -def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus: - header_size, = struct.unpack(' LazyTensor: - data_type = SAFETENSORS_DATA_TYPES[info['dtype']] - numpy_dtype = data_type.dtype - shape: list[int] = info['shape'] - begin, end = info['data_offsets'] - assert 0 <= begin <= end <= len(byte_buf) - assert end - begin == math.prod(shape) * numpy_dtype.itemsize - buf = byte_buf[begin:end] - - def load() -> UnquantizedTensor: - return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape)) - description = f'safetensors begin={begin} end={end} type={data_type} path={path}' - return LazyTensor(load, shape, data_type, description) - model = {name: convert(info) for (name, info) in header.items() if name != '__metadata__'} - return ModelPlus(model=model, paths=[path], format='safetensors', vocab=None) - - -def must_read(fp: IO[bytes], length: int) -> bytes: - ret = fp.read(length) - if len(ret) < length: - raise EOFError("unexpectedly reached end of file") - return ret - - -@functools.lru_cache(maxsize=None) -def lazy_load_file(path: Path) -> ModelPlus: - fp = open(path, 'rb') - first8 = fp.read(8) - fp.seek(0) - if first8[:2] == b'PK': - # A zip file, i.e. PyTorch format - return lazy_load_torch_file(fp, path) - elif struct.unpack(' Iterable[Out]: - '''Parallel map, but with backpressure. If the caller doesn't call `next` - fast enough, this will stop calling `func` at some point rather than - letting results pile up in memory. Specifically, there is a max of one - output value buffered per thread.''' - if concurrency < 2: - yield from map(func, iterable) - # Not reached. - iterable = iter(iterable) - executor_class: type[ThreadPoolExecutor] | type[ProcessPoolExecutor] - if use_processpool_executor: - executor_class = ProcessPoolExecutor - else: - executor_class = ThreadPoolExecutor - with executor_class(max_workers=max_workers) as executor: - futures: list[concurrent.futures.Future[Out]] = [] - done = False - for _ in range(concurrency): - try: - futures.append(executor.submit(func, next(iterable))) - except StopIteration: - done = True - break - - while futures: - result = futures.pop(0).result() - while not done and len(futures) < concurrency: - try: - futures.append(executor.submit(func, next(iterable))) - except StopIteration: - done = True - break - yield result - - -def check_vocab_size(params: Params, vocab: BaseVocab, pad_vocab: bool = False) -> None: - # Handle special case where the model's vocab size is not set - if params.n_vocab == -1: - raise ValueError( - "The model's vocab size is set to -1 in params.json. Please update it manually." - + (f" Maybe {vocab.vocab_size}?" if isinstance(vocab, Vocab) else ""), - ) - if not isinstance(vocab, Vocab): - return # model has no vocab - - # Check for a vocab size mismatch - if params.n_vocab == vocab.vocab_size: - logger.warning("Ignoring added_tokens.json since model matches vocab size without it.") - return - - if pad_vocab and params.n_vocab > vocab.vocab_size: - pad_count = params.n_vocab - vocab.vocab_size - logger.debug( - f"Padding vocab with {pad_count} token(s) - through " - ) - for i in range(1, pad_count + 1): - vocab.added_tokens_dict[f""] = -1 - vocab.added_tokens_list.append(f"") - vocab.vocab_size = params.n_vocab - return - - msg = f"Vocab size mismatch (model has {params.n_vocab}, but {vocab.fname_tokenizer} has {vocab.vocab_size})." - if vocab.vocab_size < params.n_vocab < vocab.vocab_size + 20: - msg += f" Most likely you are missing added_tokens.json (should be in {vocab.fname_tokenizer.parent})." - if vocab.vocab_size < params.n_vocab: - msg += " Add the --pad-vocab option and try again." - - raise ValueError(msg) - - -class OutputFile: - def __init__(self, fname_out: Path, endianess:gguf.GGUFEndian = gguf.GGUFEndian.LITTLE): - self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH], endianess=endianess) - - def add_meta_model(self, params: Params, metadata: gguf.Metadata | None) -> None: - # Metadata About The Model And Its Provenence - name = "LLaMA" - if metadata is not None and metadata.name is not None: - name = metadata.name - elif params.path_model is not None: - name = params.path_model.name - elif params.n_ctx == 4096: - # Heuristic detection of LLaMA v2 model - name = "LLaMA v2" - - self.gguf.add_name(name) - - if metadata is not None: - if metadata.author is not None: - self.gguf.add_author(metadata.author) - if metadata.version is not None: - self.gguf.add_version(metadata.version) - if metadata.organization is not None: - self.gguf.add_organization(metadata.organization) - - if metadata.finetune is not None: - self.gguf.add_finetune(metadata.finetune) - if metadata.basename is not None: - self.gguf.add_basename(metadata.basename) - - if metadata.description is not None: - self.gguf.add_description(metadata.description) - if metadata.quantized_by is not None: - self.gguf.add_quantized_by(metadata.quantized_by) - - if metadata.size_label is not None: - self.gguf.add_size_label(metadata.size_label) - - if metadata.license is not None: - self.gguf.add_license(metadata.license) - if metadata.license_name is not None: - self.gguf.add_license_name(metadata.license_name) - if metadata.license_link is not None: - self.gguf.add_license_link(metadata.license_link) - - if metadata.url is not None: - self.gguf.add_url(metadata.url) - if metadata.doi is not None: - self.gguf.add_doi(metadata.doi) - if metadata.uuid is not None: - self.gguf.add_uuid(metadata.uuid) - if metadata.repo_url is not None: - self.gguf.add_repo_url(metadata.repo_url) - - if metadata.source_url is not None: - self.gguf.add_source_url(metadata.source_url) - if metadata.source_doi is not None: - self.gguf.add_source_doi(metadata.source_doi) - if metadata.source_uuid is not None: - self.gguf.add_source_uuid(metadata.source_uuid) - if metadata.source_repo_url is not None: - self.gguf.add_source_repo_url(metadata.source_repo_url) - - if metadata.base_models is not None: - self.gguf.add_base_model_count(len(metadata.base_models)) - for key, base_model_entry in enumerate(metadata.base_models): - if "name" in base_model_entry: - self.gguf.add_base_model_name(key, base_model_entry["name"]) - if "author" in base_model_entry: - self.gguf.add_base_model_author(key, base_model_entry["author"]) - if "version" in base_model_entry: - self.gguf.add_base_model_version(key, base_model_entry["version"]) - if "organization" in base_model_entry: - self.gguf.add_base_model_organization(key, base_model_entry["organization"]) - if "url" in base_model_entry: - self.gguf.add_base_model_url(key, base_model_entry["url"]) - if "doi" in base_model_entry: - self.gguf.add_base_model_doi(key, base_model_entry["doi"]) - if "uuid" in base_model_entry: - self.gguf.add_base_model_uuid(key, base_model_entry["uuid"]) - if "repo_url" in base_model_entry: - self.gguf.add_base_model_repo_url(key, base_model_entry["repo_url"]) - - if metadata.tags is not None: - self.gguf.add_tags(metadata.tags) - if metadata.languages is not None: - self.gguf.add_languages(metadata.languages) - if metadata.datasets is not None: - self.gguf.add_datasets(metadata.datasets) - - def add_meta_arch(self, params: Params) -> None: - # Metadata About The Neural Architecture Itself - self.gguf.add_vocab_size(params.n_vocab) - self.gguf.add_context_length(params.n_ctx) - self.gguf.add_embedding_length(params.n_embd) - self.gguf.add_block_count(params.n_layer) - self.gguf.add_feed_forward_length(params.n_ff) - self.gguf.add_rope_dimension_count(params.n_embd // params.n_head) - self.gguf.add_head_count (params.n_head) - self.gguf.add_head_count_kv (params.n_head_kv) - - if params.n_experts: - self.gguf.add_expert_count(params.n_experts) - - if params.n_experts_used: - self.gguf.add_expert_used_count(params.n_experts_used) - - if params.f_norm_eps: - self.gguf.add_layer_norm_rms_eps(params.f_norm_eps) - else: - raise ValueError('f_norm_eps is None') - - if params.f_rope_freq_base is not None: - self.gguf.add_rope_freq_base(params.f_rope_freq_base) - - if params.rope_scaling_type: - assert params.f_rope_scale is not None - self.gguf.add_rope_scaling_type(params.rope_scaling_type) - self.gguf.add_rope_scaling_factor(params.f_rope_scale) - - if params.n_ctx_orig is not None: - self.gguf.add_rope_scaling_orig_ctx_len(params.n_ctx_orig) - - if params.rope_finetuned is not None: - self.gguf.add_rope_scaling_finetuned(params.rope_finetuned) - - if params.ftype is not None: - self.gguf.add_file_type(params.ftype) - - def extract_vocabulary_from_model(self, vocab: Vocab) -> tuple[list[bytes], list[float], list[gguf.TokenType]]: - tokens = [] - scores = [] - toktypes = [] - - # NOTE: `all_tokens` returns the base vocabulary and added tokens - for text, score, toktype in vocab.all_tokens(): - tokens.append(text) - scores.append(score) - toktypes.append(toktype) - - assert len(tokens) == vocab.vocab_size - - return tokens, scores, toktypes - - def add_meta_vocab(self, vocab: Vocab) -> None: - # Ensure that tokenizer_model is added to the GGUF model - self.gguf.add_tokenizer_model(vocab.tokenizer_model) - - # Extract model vocabulary for model conversion - tokens, scores, toktypes = self.extract_vocabulary_from_model(vocab) - - # Add extracted token information for model conversion - self.gguf.add_token_list(tokens) - self.gguf.add_token_scores(scores) - self.gguf.add_token_types(toktypes) - - def add_meta_special_vocab(self, svocab: gguf.SpecialVocab) -> None: - svocab.add_to_gguf(self.gguf) - - def add_tensor_info(self, name: str, tensor: LazyTensor) -> None: - n_elements = int(np.prod(tensor.shape)) - raw_dtype = getattr(tensor.data_type, 'ggml_type', None) - data_type = getattr(tensor.data_type, 'quantized_type', None) or tensor.data_type.dtype - data_nbytes = tensor.data_type.elements_to_bytes(n_elements) - self.gguf.add_tensor_info(name, tensor.shape, data_type, data_nbytes, raw_dtype=raw_dtype) - - def write_meta(self) -> None: - self.gguf.write_header_to_file() - self.gguf.write_kv_data_to_file() - - def write_tensor_info(self) -> None: - self.gguf.write_ti_data_to_file() - - def write_tensor_data(self, ftype: GGMLFileType, model: LazyModel, concurrency: int) -> None: - ndarrays_inner = bounded_parallel_map(OutputFile.do_item, model.items(), concurrency=concurrency) - if ftype == GGMLFileType.MostlyQ8_0: - ndarrays = bounded_parallel_map( - OutputFile.maybe_do_quantize, ndarrays_inner, concurrency=concurrency, max_workers=concurrency, - use_processpool_executor=True, - ) - else: - ndarrays = map(OutputFile.maybe_do_quantize, ndarrays_inner) - - start = time.time() - for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)): - elapsed = time.time() - start - size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape) - padi = len(str(len(model))) - logger.info( - f"[{i + 1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type.name:4} | T+{int(elapsed):4}" - ) - self.gguf.write_tensor_data(ndarray) - - def close(self) -> None: - self.gguf.close() - - @staticmethod - def write_vocab_only( - fname_out: Path, params: Params, vocab: Vocab, svocab: gguf.SpecialVocab, - endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False, metadata: gguf.Metadata | None = None, - ) -> None: - check_vocab_size(params, vocab, pad_vocab=pad_vocab) - - of = OutputFile(fname_out, endianess=endianess) - - # meta data - of.add_meta_model(params, metadata) - of.add_meta_arch(params) - of.add_meta_vocab(vocab) - of.add_meta_special_vocab(svocab) - - of.write_meta() - - of.close() - - @staticmethod - def do_item(item: tuple[str, LazyTensor]) -> tuple[DataType, NDArray]: - name, lazy_tensor = item - tensor = lazy_tensor.load().to_ggml() - return (lazy_tensor.data_type, tensor.ndarray) - - @staticmethod - def maybe_do_quantize(item: tuple[DataType, NDArray]) -> NDArray: - dt, arr = item - if not isinstance(dt, QuantizedDataType): - return arr - return dt.quantize(arr) - - @staticmethod - def write_all( - fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: BaseVocab, svocab: gguf.SpecialVocab, - concurrency: int = DEFAULT_CONCURRENCY, endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, - pad_vocab: bool = False, - metadata: gguf.Metadata | None = None, - ) -> None: - check_vocab_size(params, vocab, pad_vocab=pad_vocab) - - of = OutputFile(fname_out, endianess=endianess) - - # meta data - of.add_meta_model(params, metadata) - of.add_meta_arch(params) - if isinstance(vocab, Vocab): - of.add_meta_vocab(vocab) - of.add_meta_special_vocab(svocab) - else: # NoVocab - of.gguf.add_tokenizer_model(vocab.tokenizer_model) - - # tensor info - for name, lazy_tensor in model.items(): - of.add_tensor_info(name, lazy_tensor) - - of.write_meta() - of.write_tensor_info() - - # tensor data - of.write_tensor_data(ftype, model, concurrency) - - of.close() - - -def pick_output_type(model: LazyModel, output_type_str: str | None) -> GGMLFileType: - wq_type = model[gguf.TENSOR_NAMES[gguf.MODEL_TENSOR.ATTN_Q].format(bid=0) + ".weight"].data_type - - if output_type_str == "f32" or (output_type_str is None and wq_type in (DT_F32, DT_BF16)): - return GGMLFileType.AllF32 - if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16): - return GGMLFileType.MostlyF16 - if output_type_str == "q8_0": - return GGMLFileType.MostlyQ8_0 - - name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()} - - raise ValueError(f"Unexpected combination of types: {name_to_type}") - - -def per_model_weight_count_estimation(tensors: Iterable[tuple[str, LazyTensor]]) -> tuple[int, int, int]: - total_params = 0 - shared_params = 0 - expert_params = 0 - - for name, lazy_tensor in tensors: - # We don't need these - if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")): - continue - - # Got A Tensor - sum_weights_in_tensor: int = 1 - - # Tensor Volume - for dim in lazy_tensor.shape: - sum_weights_in_tensor *= dim - - if ".experts." in name: - if ".experts.0." in name: - expert_params += sum_weights_in_tensor - else: - shared_params += sum_weights_in_tensor - - total_params += sum_weights_in_tensor - - return total_params, shared_params, expert_params - - -def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel: - return {name: tensor.astype(output_type.type_for_tensor(name, tensor)) - for (name, tensor) in model.items()} - - -def convert_model_names(model: LazyModel, params: Params, skip_unknown: bool) -> LazyModel: - tmap = gguf.TensorNameMap(ARCH, params.n_layer) - should_skip = set(gguf.MODEL_TENSOR_SKIP.get(ARCH, [])) - - tmp = model - - # merge experts into one tensor - if params.n_experts and params.n_experts > 0: - for i_l in range(params.n_layer): - for w in range(1, 4): - experts = [] - for e in range(params.n_experts): - if f"layers.{i_l}.feed_forward.experts.{e}.w{w}.weight" in model: - experts.append(model[f"layers.{i_l}.feed_forward.experts.{e}.w{w}.weight"]) - del tmp[f"layers.{i_l}.feed_forward.experts.{e}.w{w}.weight"] - elif f"model.layers.{i_l}.block_sparse_moe.experts.{e}.w{w}.weight" in model: - experts.append(model[f"model.layers.{i_l}.block_sparse_moe.experts.{e}.w{w}.weight"]) - del tmp[f"model.layers.{i_l}.block_sparse_moe.experts.{e}.w{w}.weight"] - else: - raise ValueError(f"Expert tensor not found: layers.{i_l}.feed_forward.experts.{e}.w{w}.weight") - tmp[f"layers.{i_l}.feed_forward.experts.w{w}.weight"] = pack_experts_lazy(experts) - - # HF models permut or pack some of the tensors, so we need to undo that - for i in itertools.count(): - if f"model.layers.{i}.self_attn.q_proj.weight" in model: - logger.debug(f"Permuting layer {i}") - tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head, params.n_head) - tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_head_kv) - # tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"] - elif f"model.layers.{i}.self_attn.W_pack.weight" in model: - logger.debug(f"Unpacking and permuting layer {i}") - tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head, params.n_head) - tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head, params.n_head_kv) - tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2) - del tmp[f"model.layers.{i}.self_attn.W_pack.weight"] - else: - break - - out: LazyModel = {} - for name, lazy_tensor in model.items(): - tensor_type, name_new = tmap.get_type_and_name(name, try_suffixes = (".weight", ".bias")) or (None, None) - if name_new is None: - if skip_unknown: - logger.warning(f"Unexpected tensor name: {name} - skipping") - continue - raise ValueError(f"Unexpected tensor name: {name}. Use --skip-unknown to ignore it (e.g. LLaVA)") - - if tensor_type in should_skip: - logger.debug(f"skipping tensor {name_new}") - continue - - logger.debug(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type.name:6s} | {lazy_tensor.shape}") - out[name_new] = lazy_tensor - - return out - - -def nth_multifile_path(path: Path, n: int) -> Path | None: - '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return - the nth path in the model. - ''' - # Support the following patterns: - patterns = [ - # - x.00.pth, x.01.pth, etc. - (r'\.[0-9]{2}\.pth$', f'.{n:02}.pth'), - # - x-00001-of-00002.bin, x-00002-of-00002.bin, etc. - (r'-[0-9]{5}-of-(.*)$', fr'-{n:05}-of-\1'), - # x.bin, x.bin.1, etc. - (r'(\.[0-9]+)?$', r'\1' if n == 0 else fr'\1.{n}') - ] - for regex, replacement in patterns: - if re.search(regex, path.name): - new_path = path.with_name(re.sub(regex, replacement, path.name)) - if new_path.exists(): - return new_path - return None - - -def find_multifile_paths(path: Path) -> list[Path]: - '''Given any path belonging to a multi-file model (e.g. foo.bin.1), return - the whole list of paths in the model. - ''' - ret: list[Path] = [] - for i in itertools.count(): - nth_path = nth_multifile_path(path, i) - if nth_path is None: - break - ret.append(nth_path) - if not ret: - # No matches. This should only happen if the file was named, e.g., - # foo.0, and there was no file named foo. Oh well, try to process it - # as a single file. - return [path] - return ret - - -def load_some_model(path: Path) -> ModelPlus: - '''Load a model of any supported format.''' - # Be extra-friendly and accept either a file or a directory: - if path.is_dir(): - # Check if it's a set of safetensors files first - globs = ["model-00001-of-*.safetensors", "model.safetensors", "consolidated.safetensors"] - files = [file for glob in globs for file in path.glob(glob)] - if not files: - # Try the PyTorch patterns too, with lower priority - globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"] - files = [file for glob in globs for file in path.glob(glob)] - if not files: - raise FileNotFoundError(f"Can't find model in directory {path}") - if len(files) > 1: - raise ValueError(f"Found multiple models in {path}, not sure which to pick: {files}") - path = files[0] - - paths = find_multifile_paths(path) - models_plus: list[ModelPlus] = [] - for path in paths: - logger.info(f"Loading model file {path}") - models_plus.append(lazy_load_file(path)) - - model_plus = merge_multifile_models(models_plus) - return model_plus - - -class VocabFactory: - _VOCAB_CLASSES: list[type[Vocab]] = [SentencePieceVocab, BpeVocab, LlamaHfVocab] - - def __init__(self, path: Path): - self.path = path - - def _create_special_vocab(self, vocab: BaseVocab, model_parent_path: Path) -> gguf.SpecialVocab: - load_merges = vocab.name == "bpe" - n_vocab = vocab.vocab_size if isinstance(vocab, Vocab) else None - return gguf.SpecialVocab( - model_parent_path, - load_merges=load_merges, - special_token_types=None, # Predetermined or passed as a parameter - n_vocab=n_vocab, - ) - - def _create_vocab_by_path(self, vocab_types: list[str]) -> Vocab: - vocab_classes: dict[str, type[Vocab]] = {cls.name: cls for cls in self._VOCAB_CLASSES} - selected_vocabs: dict[str, type[Vocab]] = {} - for vtype in vocab_types: - try: - selected_vocabs[vtype] = vocab_classes[vtype] - except KeyError: - raise ValueError(f"Unsupported vocabulary type {vtype}") from None - - for vtype, cls in selected_vocabs.items(): - try: - vocab = cls(self.path) - break - except FileNotFoundError: - pass # ignore unavailable tokenizers - else: - raise FileNotFoundError(f"Could not find a tokenizer matching any of {vocab_types}") - - logger.info(f"Loaded vocab file {vocab.fname_tokenizer!r}, type {vocab.name!r}") - return vocab - - def load_vocab(self, vocab_types: list[str] | None, model_parent_path: Path) -> tuple[BaseVocab, gguf.SpecialVocab]: - vocab: BaseVocab - if vocab_types is None: - vocab = NoVocab() - else: - vocab = self._create_vocab_by_path(vocab_types) - # FIXME: Respect --vocab-dir? - special_vocab = self._create_special_vocab( - vocab, - model_parent_path, - ) - return vocab, special_vocab - - -def default_convention_outfile(file_type: GGMLFileType, expert_count: int | None, model_params_count: tuple[int, int, int], metadata: gguf.Metadata) -> str: - name = metadata.name if metadata.name is not None else None - basename = metadata.basename if metadata.basename is not None else None - finetune = metadata.finetune if metadata.finetune is not None else None - version = metadata.version if metadata.version is not None else None - size_label = metadata.size_label if metadata.size_label is not None else gguf.size_label(*model_params_count, expert_count=expert_count or 0) - - output_type = { - GGMLFileType.AllF32: "F32", - GGMLFileType.MostlyF16: "F16", - GGMLFileType.MostlyQ8_0: "Q8_0", - }[file_type] - - return gguf.naming_convention(name, basename, finetune, version, size_label, output_type) - - -def default_outfile(model_paths: list[Path], file_type: GGMLFileType, expert_count: int | None, model_params_count: tuple[int, int, int], metadata: gguf.Metadata) -> Path: - default_filename = default_convention_outfile(file_type, expert_count, model_params_count, metadata) - ret = model_paths[0].parent / f"{default_filename}.gguf" - if ret in model_paths: - logger.error( - f"Error: Default output path ({ret}) would overwrite the input. " - "Please explicitly specify a path using --outfile.") - sys.exit(1) - return ret - - -def do_dump_model(model_plus: ModelPlus) -> None: - print(f"model_plus.paths = {model_plus.paths!r}") # noqa: NP100 - print(f"model_plus.format = {model_plus.format!r}") # noqa: NP100 - print(f"model_plus.vocab = {model_plus.vocab!r}") # noqa: NP100 - for name, lazy_tensor in model_plus.model.items(): - print(f"{name}: shape={lazy_tensor.shape} type={lazy_tensor.data_type}; {lazy_tensor.description}") # noqa: NP100 - - -def main(args_in: list[str] | None = None) -> None: - output_choices = ["f32", "f16"] - if np.uint32(1) == np.uint32(1).newbyteorder("<"): - # We currently only support Q8_0 output on little endian systems. - output_choices.append("q8_0") - parser = argparse.ArgumentParser(description="Convert a LLaMA model to a GGML compatible file") - parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model") - parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file") - parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab") - parser.add_argument("--no-vocab", action="store_true", help="store model without the vocab") - parser.add_argument("--outtype", choices=output_choices, help="output format - note: q8_0 may be very slow (default: f16 or f32 based on input)") - parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file") - parser.add_argument("--vocab-type", help="vocab types to try in order, choose from 'spm', 'bpe', 'hfft' (default: spm,hfft)", default="spm,hfft") - parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") - parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)") - parser.add_argument("--ctx", type=int, help="model training context (default: based on input)") - parser.add_argument("--concurrency", type=int, help=f"concurrency used for conversion (default: {DEFAULT_CONCURRENCY})", default=DEFAULT_CONCURRENCY) - parser.add_argument("--big-endian", action="store_true", help="model is executed on big endian machine") - parser.add_argument("--pad-vocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides") - parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing") - parser.add_argument("--verbose", action="store_true", help="increase output verbosity") - parser.add_argument("--metadata", type=Path, help="Specify the path for an authorship metadata override file") - parser.add_argument("--get-outfile", action="store_true", help="get calculated default outfile name") - parser.add_argument("--model-name", type=str, default=None, help="name of the model") - - args = parser.parse_args(args_in) - - if args.verbose: - logging.basicConfig(level=logging.DEBUG) - elif args.dump_single or args.dump or args.get_outfile: - # Avoid printing anything besides the dump output - logging.basicConfig(level=logging.WARNING) - else: - logging.basicConfig(level=logging.INFO) - - model_name = args.model_name - dir_model = args.model - - metadata = gguf.Metadata.load(args.metadata, dir_model, model_name) - - if args.get_outfile: - model_plus = load_some_model(dir_model) - params = Params.load(model_plus) - model = convert_model_names(model_plus.model, params, args.skip_unknown) - model_params_count = per_model_weight_count_estimation(model_plus.model.items()) - ftype = pick_output_type(model, args.outtype) - - if (metadata is None or metadata.name is None) and params.path_model is not None: - metadata.name = params.path_model.name - - print(f"{default_convention_outfile(ftype, params.n_experts, model_params_count, metadata)}") # noqa: NP100 - return - - if args.no_vocab and args.vocab_only: - raise ValueError("--vocab-only does not make sense with --no-vocab") - - if args.dump_single: - model_plus = lazy_load_file(dir_model) - do_dump_model(model_plus) - return - - if not args.vocab_only: - model_plus = load_some_model(dir_model) - else: - model_plus = ModelPlus(model = {}, paths = [dir_model / 'dummy'], format = 'none', vocab = None) - - if args.dump: - do_dump_model(model_plus) - return - - endianess = gguf.GGUFEndian.LITTLE - if args.big_endian: - endianess = gguf.GGUFEndian.BIG - - params = None - if args.pad_vocab or not args.vocab_only: - params = Params.load(model_plus) - if params.n_ctx == -1: - if args.ctx is None: - msg = """\ - The model doesn't have a context size, and you didn't specify one with --ctx - Please specify one with --ctx: - - LLaMA v1: --ctx 2048 - - LLaMA v2: --ctx 4096""" - parser.error(textwrap.dedent(msg)) - params.n_ctx = args.ctx - - if args.outtype: - params.ftype = { - "f32": GGMLFileType.AllF32, - "f16": GGMLFileType.MostlyF16, - "q8_0": GGMLFileType.MostlyQ8_0, - }[args.outtype] - - logger.info(f"params = {params}") - - model_parent_path = model_plus.paths[0].parent - vocab_path = Path(args.vocab_dir or dir_model or model_parent_path) - vocab_factory = VocabFactory(vocab_path) - vocab_types = None if args.no_vocab else args.vocab_type.split(",") - vocab, special_vocab = vocab_factory.load_vocab(vocab_types, model_parent_path) - - if args.vocab_only: - assert isinstance(vocab, Vocab) - if not args.outfile: - raise ValueError("need --outfile if using --vocab-only") - outfile = args.outfile - if params is None: - params = Params( - n_vocab = vocab.vocab_size, - n_embd = 1, - n_layer = 1, - n_ctx = 1, - n_ff = 1, - n_head = 1, - n_head_kv = 1, - f_norm_eps = 1e-5, - ) - OutputFile.write_vocab_only(outfile, params, vocab, special_vocab, - endianess=endianess, pad_vocab=args.pad_vocab, metadata=metadata) - logger.info(f"Wrote {outfile}") - return - - if model_plus.vocab is not None and args.vocab_dir is None and not args.no_vocab: - vocab = model_plus.vocab - - assert params is not None - - if metadata.name is None and params.path_model is not None: - metadata.name = params.path_model.name - - model_params_count = per_model_weight_count_estimation(model_plus.model.items()) - logger.info(f"model parameters count : {model_params_count} ({gguf.model_weight_count_rounded_notation(model_params_count[0])})") - - logger.info(f"Vocab info: {vocab}") - logger.info(f"Special vocab info: {special_vocab}") - model = model_plus.model - model = convert_model_names(model, params, args.skip_unknown) - ftype = pick_output_type(model, args.outtype) - model = convert_to_output_type(model, ftype) - outfile = args.outfile or default_outfile(model_plus.paths, ftype, params.n_experts, model_params_count, metadata=metadata) - - metadata.size_label = gguf.size_label(*model_params_count, expert_count=params.n_experts or 0) - - params.ftype = ftype - logger.info(f"Writing {outfile}, format {ftype}") - - OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab, - concurrency=args.concurrency, endianess=endianess, pad_vocab=args.pad_vocab, metadata=metadata) - logger.info(f"Wrote {outfile}") - - -if __name__ == '__main__': - main() diff --git a/examples/json_schema_pydantic_example.py b/examples/json_schema_pydantic_example.py deleted file mode 100644 index 19c0bdb5b..000000000 --- a/examples/json_schema_pydantic_example.py +++ /dev/null @@ -1,82 +0,0 @@ -# Usage: -#! ./llama-server -m some-model.gguf & -#! pip install pydantic -#! python json_schema_pydantic_example.py - -from pydantic import BaseModel, Field, TypeAdapter -from annotated_types import MinLen -from typing import Annotated, List, Optional -import json, requests - -if True: - - def create_completion(*, response_model=None, endpoint="http://localhost:8080/v1/chat/completions", messages, **kwargs): - ''' - Creates a chat completion using an OpenAI-compatible endpoint w/ JSON schema support - (llama.cpp server, llama-cpp-python, Anyscale / Together...) - - The response_model param takes a type (+ supports Pydantic) and behaves just as w/ Instructor (see below) - ''' - response_format = None - type_adapter = None - - if response_model: - type_adapter = TypeAdapter(response_model) - schema = type_adapter.json_schema() - messages = [{ - "role": "system", - "content": f"You respond in JSON format with the following schema: {json.dumps(schema, indent=2)}" - }] + messages - response_format={"type": "json_object", "schema": schema} - - data = requests.post(endpoint, headers={"Content-Type": "application/json"}, - json=dict(messages=messages, response_format=response_format, **kwargs)).json() - if 'error' in data: - raise Exception(data['error']['message']) - - content = data["choices"][0]["message"]["content"] - return type_adapter.validate_json(content) if type_adapter else content - -else: - - # This alternative branch uses Instructor + OpenAI client lib. - # Instructor support streamed iterable responses, retry & more. - # (see https://python.useinstructor.com/) - #! pip install instructor openai - import instructor, openai - client = instructor.patch( - openai.OpenAI(api_key="123", base_url="http://localhost:8080"), - mode=instructor.Mode.JSON_SCHEMA) - create_completion = client.chat.completions.create - - -if __name__ == '__main__': - - class QAPair(BaseModel): - class Config: - extra = 'forbid' # triggers additionalProperties: false in the JSON schema - question: str - concise_answer: str - justification: str - stars: Annotated[int, Field(ge=1, le=5)] - - class PyramidalSummary(BaseModel): - class Config: - extra = 'forbid' # triggers additionalProperties: false in the JSON schema - title: str - summary: str - question_answers: Annotated[List[QAPair], MinLen(2)] - sub_sections: Optional[Annotated[List['PyramidalSummary'], MinLen(2)]] - - print("# Summary\n", create_completion( - model="...", - response_model=PyramidalSummary, - messages=[{ - "role": "user", - "content": f""" - You are a highly efficient corporate document summarizer. - Create a pyramidal summary of an imaginary internal document about our company processes - (starting high-level, going down to each sub sections). - Keep questions short, and answers even shorter (trivia / quizz style). - """ - }])) diff --git a/examples/json_schema_to_grammar.py b/examples/json_schema_to_grammar.py deleted file mode 100755 index a8779bf3b..000000000 --- a/examples/json_schema_to_grammar.py +++ /dev/null @@ -1,811 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import annotations - -import argparse -import itertools -import json -import re -import sys -from typing import Any, List, Optional, Set, Tuple, Union - -def _build_repetition(item_rule, min_items, max_items, separator_rule=None): - - if min_items == 0 and max_items == 1: - return f'{item_rule}?' - - if not separator_rule: - if min_items == 1 and max_items is None: - return f'{item_rule}+' - elif min_items == 0 and max_items is None: - return f'{item_rule}*' - else: - return f'{item_rule}{{{min_items},{max_items if max_items is not None else ""}}}' - - result = item_rule + ' ' + _build_repetition(f'({separator_rule} {item_rule})', min_items - 1 if min_items > 0 else 0, max_items - 1 if max_items is not None else None) - return f'({result})?' if min_items == 0 else result - -def _generate_min_max_int(min_value: Optional[int], max_value: Optional[int], out: list, decimals_left: int = 16, top_level: bool = True): - has_min = min_value != None - has_max = max_value != None - - def digit_range(from_char: str, to_char: str): - out.append("[") - if from_char == to_char: - out.append(from_char) - else: - out.append(from_char) - out.append("-") - out.append(to_char) - out.append("]") - - def more_digits(min_digits: int, max_digits: int): - out.append("[0-9]") - if min_digits == max_digits and min_digits == 1: - return - out.append("{") - out.append(str(min_digits)) - if max_digits != min_digits: - out.append(",") - if max_digits != sys.maxsize: - out.append(str(max_digits)) - out.append("}") - - def uniform_range(from_str: str, to_str: str): - i = 0 - while i < len(from_str) and from_str[i] == to_str[i]: - i += 1 - if i > 0: - out.append("\"") - out.append(from_str[:i]) - out.append("\"") - if i < len(from_str): - if i > 0: - out.append(" ") - sub_len = len(from_str) - i - 1 - if sub_len > 0: - from_sub = from_str[i+1:] - to_sub = to_str[i+1:] - sub_zeros = "0" * sub_len - sub_nines = "9" * sub_len - - to_reached = False - out.append("(") - if from_sub == sub_zeros: - digit_range(from_str[i], chr(ord(to_str[i]) - 1)) - out.append(" ") - more_digits(sub_len, sub_len) - else: - out.append("[") - out.append(from_str[i]) - out.append("] ") - out.append("(") - uniform_range(from_sub, sub_nines) - out.append(")") - if ord(from_str[i]) < ord(to_str[i]) - 1: - out.append(" | ") - if to_sub == sub_nines: - digit_range(chr(ord(from_str[i]) + 1), to_str[i]) - to_reached = True - else: - digit_range(chr(ord(from_str[i]) + 1), chr(ord(to_str[i]) - 1)) - out.append(" ") - more_digits(sub_len, sub_len) - if not to_reached: - out.append(" | ") - digit_range(to_str[i], to_str[i]) - out.append(" ") - uniform_range(sub_zeros, to_sub) - out.append(")") - else: - out.append("[") - out.append(from_str[i]) - out.append("-") - out.append(to_str[i]) - out.append("]") - - if has_min and has_max: - if min_value < 0 and max_value < 0: - out.append("\"-\" (") - _generate_min_max_int(-max_value, -min_value, out, decimals_left, top_level=True) - out.append(")") - return - - if min_value < 0: - out.append("\"-\" (") - _generate_min_max_int(0, -min_value, out, decimals_left, top_level=True) - out.append(") | ") - min_value = 0 - - min_s = str(min_value) - max_s = str(max_value) - min_digits = len(min_s) - max_digits = len(max_s) - - for digits in range(min_digits, max_digits): - uniform_range(min_s, "9" * digits) - min_s = "1" + "0" * digits - out.append(" | ") - uniform_range(min_s, max_s) - return - - less_decimals = max(decimals_left - 1, 1) - - if has_min: - if min_value < 0: - out.append("\"-\" (") - _generate_min_max_int(None, -min_value, out, decimals_left, top_level=False) - out.append(") | [0] | [1-9] ") - more_digits(0, decimals_left - 1) - elif min_value == 0: - if top_level: - out.append("[0] | [1-9] ") - more_digits(0, less_decimals) - else: - more_digits(1, decimals_left) - elif min_value <= 9: - c = str(min_value) - range_start = '1' if top_level else '0' - if c > range_start: - digit_range(range_start, chr(ord(c) - 1)) - out.append(" ") - more_digits(1, less_decimals) - out.append(" | ") - digit_range(c, "9") - out.append(" ") - more_digits(0, less_decimals) - else: - min_s = str(min_value) - length = len(min_s) - c = min_s[0] - - if c > "1": - digit_range("1" if top_level else "0", chr(ord(c) - 1)) - out.append(" ") - more_digits(length, less_decimals) - out.append(" | ") - digit_range(c, c) - out.append(" (") - _generate_min_max_int(int(min_s[1:]), None, out, less_decimals, top_level=False) - out.append(")") - if c < "9": - out.append(" | ") - digit_range(chr(ord(c) + 1), "9") - out.append(" ") - more_digits(length - 1, less_decimals) - return - - if has_max: - if max_value >= 0: - if top_level: - out.append("\"-\" [1-9] ") - more_digits(0, less_decimals) - out.append(" | ") - _generate_min_max_int(0, max_value, out, decimals_left, top_level=True) - else: - out.append("\"-\" (") - _generate_min_max_int(-max_value, None, out, decimals_left, top_level=False) - out.append(")") - return - - raise RuntimeError("At least one of min_value or max_value must be set") - -class BuiltinRule: - def __init__(self, content: str, deps: list | None = None): - self.content = content - self.deps = deps or [] - -# Constraining spaces to prevent model "running away". -SPACE_RULE = '| " " | "\\n" [ \\t]{0,20}' - -PRIMITIVE_RULES = { - 'boolean' : BuiltinRule('("true" | "false") space', []), - 'decimal-part' : BuiltinRule('[0-9]{1,16}', []), - 'integral-part': BuiltinRule('[0] | [1-9] [0-9]{0,15}', []), - 'number' : BuiltinRule('("-"? integral-part) ("." decimal-part)? ([eE] [-+]? integral-part)? space', ['integral-part', 'decimal-part']), - 'integer' : BuiltinRule('("-"? integral-part) space', ['integral-part']), - 'value' : BuiltinRule('object | array | string | number | boolean | null', ['object', 'array', 'string', 'number', 'boolean', 'null']), - 'object' : BuiltinRule('"{" space ( string ":" space value ("," space string ":" space value)* )? "}" space', ['string', 'value']), - 'array' : BuiltinRule('"[" space ( value ("," space value)* )? "]" space', ['value']), - 'uuid' : BuiltinRule(r'"\"" [0-9a-fA-F]{8} "-" [0-9a-fA-F]{4} "-" [0-9a-fA-F]{4} "-" [0-9a-fA-F]{4} "-" [0-9a-fA-F]{12} "\"" space', []), - 'char' : BuiltinRule(r'[^"\\\x7F\x00-\x1F] | [\\] (["\\bfnrt] | "u" [0-9a-fA-F]{4})', []), - 'string' : BuiltinRule(r'"\"" char* "\"" space', ['char']), - 'null' : BuiltinRule('"null" space', []), -} - -# TODO: support "uri", "email" string formats -STRING_FORMAT_RULES = { - 'date' : BuiltinRule('[0-9]{4} "-" ( "0" [1-9] | "1" [0-2] ) "-" ( \"0\" [1-9] | [1-2] [0-9] | "3" [0-1] )', []), - 'time' : BuiltinRule('([01] [0-9] | "2" [0-3]) ":" [0-5] [0-9] ":" [0-5] [0-9] ( "." [0-9]{3} )? ( "Z" | ( "+" | "-" ) ( [01] [0-9] | "2" [0-3] ) ":" [0-5] [0-9] )', []), - 'date-time' : BuiltinRule('date "T" time', ['date', 'time']), - 'date-string' : BuiltinRule('"\\"" date "\\"" space', ['date']), - 'time-string' : BuiltinRule('"\\"" time "\\"" space', ['time']), - 'date-time-string': BuiltinRule('"\\"" date-time "\\"" space', ['date-time']), -} - -DOTALL = '[\\U00000000-\\U0010FFFF]' -DOT = '[^\\x0A\\x0D]' - -RESERVED_NAMES = set(["root", "dot", *PRIMITIVE_RULES.keys(), *STRING_FORMAT_RULES.keys()]) - -INVALID_RULE_CHARS_RE = re.compile(r'[^a-zA-Z0-9-]+') -GRAMMAR_LITERAL_ESCAPE_RE = re.compile(r'[\r\n"]') -GRAMMAR_RANGE_LITERAL_ESCAPE_RE = re.compile(r'[\r\n"\]\-\\]') -GRAMMAR_LITERAL_ESCAPES = {'\r': '\\r', '\n': '\\n', '"': '\\"', '-': '\\-', ']': '\\]'} - -NON_LITERAL_SET = set('|.()[]{}*+?') -ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = set('^$.[]()|{}*+?') - - -class SchemaConverter: - def __init__(self, *, prop_order, allow_fetch, dotall, raw_pattern): - self._prop_order = prop_order - self._allow_fetch = allow_fetch - self._dotall = dotall - self._raw_pattern = raw_pattern - self._rules = { - 'space': SPACE_RULE, - } - self._refs = {} - self._refs_being_resolved = set() - - def _format_literal(self, literal): - escaped = GRAMMAR_LITERAL_ESCAPE_RE.sub( - lambda m: GRAMMAR_LITERAL_ESCAPES.get(m.group(0)) or m.group(0), literal - ) - return f'"{escaped}"' - - def not_literal(self, literal: str, dotall: bool = True, maybe_escaped_underscores = False) -> str: - ''' - not_literal('a') -> '[^a]' - not_literal('abc') -> '([^a] | "a" ([^b] | "b" ([^c])?)?)?' - ''' - assert len(literal) > 0, 'Empty literal not supported' - def recurse(i: int): - c = literal[i] - if maybe_escaped_underscores and c == '_': - yield f'[^{c}\\\\]' - yield ' | ' - yield f'"\\\\"? "{c}"' - else: - yield f'[^{c}]' - if i < len(literal) - 1: - yield ' | ' - yield self._format_literal(c) - yield ' (' - yield from recurse(i + 1) - yield ')?' - - return ''.join(('(', *recurse(0), ')')) - - def _not_strings(self, strings): - class TrieNode: - def __init__(self): - self.children = {} - self.is_end_of_string = False - - def insert(self, string): - node = self - for c in string: - node = node.children.setdefault(c, TrieNode()) - node.is_end_of_string = True - - trie = TrieNode() - for s in strings: - trie.insert(s) - - char_rule = self._add_primitive('char', PRIMITIVE_RULES['char']) - out = ['["] ( '] - - def visit(node): - rejects = [] - first = True - for c in sorted(node.children.keys()): - child = node.children[c] - rejects.append(c) - if first: - first = False - else: - out.append(' | ') - out.append(f'[{c}]') - if child.children: - out.append(f' (') - visit(child) - out.append(')') - elif child.is_end_of_string: - out.append(f' {char_rule}+') - if node.children: - if not first: - out.append(' | ') - out.append(f'[^"{"".join(rejects)}] {char_rule}*') - visit(trie) - - out.append(f' ){"" if trie.is_end_of_string else "?"} ["] space') - return ''.join(out) - - def _add_rule(self, name, rule): - esc_name = INVALID_RULE_CHARS_RE.sub('-', name) - if esc_name not in self._rules or self._rules[esc_name] == rule: - key = esc_name - else: - i = 0 - while f'{esc_name}{i}' in self._rules and self._rules[f'{esc_name}{i}'] != rule: - i += 1 - key = f'{esc_name}{i}' - self._rules[key] = rule - return key - - def resolve_refs(self, schema: dict, url: str): - ''' - Resolves all $ref fields in the given schema, fetching any remote schemas, - replacing $ref with absolute reference URL and populating self._refs with the - respective referenced (sub)schema dictionaries. - ''' - def visit(n: dict): - if isinstance(n, list): - return [visit(x) for x in n] - elif isinstance(n, dict): - ref = n.get('$ref') - if ref is not None and ref not in self._refs: - if ref.startswith('https://'): - assert self._allow_fetch, 'Fetching remote schemas is not allowed (use --allow-fetch for force)' - import requests - - frag_split = ref.split('#') - base_url = frag_split[0] - - target = self._refs.get(base_url) - if target is None: - target = self.resolve_refs(requests.get(ref).json(), base_url) - self._refs[base_url] = target - - if len(frag_split) == 1 or frag_split[-1] == '': - return target - elif ref.startswith('#/'): - target = schema - ref = f'{url}{ref}' - n['$ref'] = ref - else: - raise ValueError(f'Unsupported ref {ref}') - - for sel in ref.split('#')[-1].split('/')[1:]: - assert target is not None and sel in target, f'Error resolving ref {ref}: {sel} not in {target}' - target = target[sel] - - self._refs[ref] = target - else: - for v in n.values(): - visit(v) - - return n - return visit(schema) - - def _generate_union_rule(self, name, alt_schemas): - return ' | '.join(( - self.visit(alt_schema, f'{name}{"-" if name else "alternative-"}{i}') - for i, alt_schema in enumerate(alt_schemas) - )) - - def _visit_pattern(self, pattern, name): - ''' - Transforms a regular expression pattern into a GBNF rule. - - Input: https://json-schema.org/understanding-json-schema/reference/regular_expressions - Output: https://github.com/ggerganov/llama.cpp/blob/master/grammars/README.md - - Unsupported features: negative/positive lookaheads, greedy/non-greedy modifiers. - - Mostly a 1:1 translation, except for {x} / {x,} / {x,y} quantifiers for which - we define sub-rules to keep the output lean. - ''' - - assert pattern.startswith('^') and pattern.endswith('$'), 'Pattern must start with "^" and end with "$"' - pattern = pattern[1:-1] - sub_rule_ids = {} - - i = 0 - length = len(pattern) - - def to_rule(s: tuple[str, bool]) -> str: - (txt, is_literal) = s - return "\"" + txt + "\"" if is_literal else txt - - def transform() -> tuple[str, bool]: - ''' - Parse a unit at index i (advancing it), and return its string representation + whether it's a literal. - ''' - nonlocal i - nonlocal pattern - nonlocal sub_rule_ids - - start = i - # For each component of this sequence, store its string representation and whether it's a literal. - # We only need a flat structure here to apply repetition operators to the last item, and - # to merge literals at the and (we're parsing grouped ( sequences ) recursively and don't treat '|' specially - # (GBNF's syntax is luckily very close to regular expressions!) - seq: list[tuple[str, bool]] = [] - - def get_dot(): - if self._dotall: - rule = DOTALL - else: - # Accept any character... except \n and \r line break chars (\x0A and \xOD) - rule = DOT - return self._add_rule(f'dot', rule) - - def join_seq(): - nonlocal seq - ret = [] - for is_literal, g in itertools.groupby(seq, lambda x: x[1]): - if is_literal: - ret.append((''.join(x[0] for x in g), True)) - else: - ret.extend(g) - if len(ret) == 1: - return ret[0] - return (' '.join(to_rule(x) for x in seq), False) - - while i < length: - c = pattern[i] - if c == '.': - seq.append((get_dot(), False)) - i += 1 - elif c == '(': - i += 1 - if i < length: - assert pattern[i] != '?', f'Unsupported pattern syntax "{pattern[i]}" at index {i} of /{pattern}/' - seq.append((f'({to_rule(transform())})', False)) - elif c == ')': - i += 1 - assert start > 0 and pattern[start-1] == '(', f'Unbalanced parentheses; start = {start}, i = {i}, pattern = {pattern}' - return join_seq() - elif c == '[': - square_brackets = c - i += 1 - while i < length and pattern[i] != ']': - if pattern[i] == '\\': - square_brackets += pattern[i:i+2] - i += 2 - else: - square_brackets += pattern[i] - i += 1 - assert i < length, f'Unbalanced square brackets; start = {start}, i = {i}, pattern = {pattern}' - square_brackets += ']' - i += 1 - seq.append((square_brackets, False)) - elif c == '|': - seq.append(('|', False)) - i += 1 - elif c in ('*', '+', '?'): - seq[-1] = (to_rule(seq[-1]) + c, False) - i += 1 - elif c == '{': - curly_brackets = c - i += 1 - while i < length and pattern[i] != '}': - curly_brackets += pattern[i] - i += 1 - assert i < length, f'Unbalanced curly brackets; start = {start}, i = {i}, pattern = {pattern}' - curly_brackets += '}' - i += 1 - nums = [s.strip() for s in curly_brackets[1:-1].split(',')] - min_times = 0 - max_times = None - try: - if len(nums) == 1: - min_times = int(nums[0]) - max_times = min_times - else: - assert len(nums) == 2 - min_times = int(nums[0]) if nums[0] else 0 - max_times = int(nums[1]) if nums[1] else None - except ValueError: - raise ValueError(f'Invalid quantifier {curly_brackets} in /{pattern}/') - - (sub, sub_is_literal) = seq[-1] - - if not sub_is_literal: - id = sub_rule_ids.get(sub) - if id is None: - id = self._add_rule(f'{name}-{len(sub_rule_ids) + 1}', sub) - sub_rule_ids[sub] = id - sub = id - - seq[-1] = (_build_repetition(f'"{sub}"' if sub_is_literal else sub, min_times, max_times), False) - else: - literal = '' - while i < length: - if pattern[i] == '\\' and i < length - 1: - next = pattern[i + 1] - if next in ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS: - i += 1 - literal += pattern[i] - i += 1 - else: - literal += pattern[i:i+2] - i += 2 - elif pattern[i] == '"' and not self._raw_pattern: - literal += '\\"' - i += 1 - elif pattern[i] not in NON_LITERAL_SET and \ - (i == length - 1 or literal == '' or pattern[i+1] == '.' or pattern[i+1] not in NON_LITERAL_SET): - literal += pattern[i] - i += 1 - else: - break - if literal: - seq.append((literal, True)) - - return join_seq() - - return self._add_rule( - name, - to_rule(transform()) if self._raw_pattern \ - else "\"\\\"\" " + to_rule(transform()) + " \"\\\"\" space") - - - def _resolve_ref(self, ref): - ref_name = ref.split('/')[-1] - if ref_name not in self._rules and ref not in self._refs_being_resolved: - self._refs_being_resolved.add(ref) - resolved = self._refs[ref] - ref_name = self.visit(resolved, ref_name) - self._refs_being_resolved.remove(ref) - return ref_name - - def _generate_constant_rule(self, value): - return self._format_literal(json.dumps(value)) - - def visit(self, schema, name): - schema_type = schema.get('type') - schema_format = schema.get('format') - rule_name = name + '-' if name in RESERVED_NAMES else name or 'root' - - if (ref := schema.get('$ref')) is not None: - return self._add_rule(rule_name, self._resolve_ref(ref)) - - elif 'oneOf' in schema or 'anyOf' in schema: - return self._add_rule(rule_name, self._generate_union_rule(name, schema.get('oneOf') or schema['anyOf'])) - - elif isinstance(schema_type, list): - return self._add_rule(rule_name, self._generate_union_rule(name, [{**schema, 'type': t} for t in schema_type])) - - elif 'const' in schema: - return self._add_rule(rule_name, self._generate_constant_rule(schema['const']) + ' space') - - elif 'enum' in schema: - rule = '(' + ' | '.join((self._generate_constant_rule(v) for v in schema['enum'])) + ') space' - return self._add_rule(rule_name, rule) - - elif schema_type in (None, 'object') and \ - ('properties' in schema or \ - ('additionalProperties' in schema and schema['additionalProperties'] is not True)): - required = set(schema.get('required', [])) - properties = list(schema.get('properties', {}).items()) - return self._add_rule(rule_name, self._build_object_rule(properties, required, name, schema.get('additionalProperties'))) - - elif schema_type in (None, 'object') and 'allOf' in schema: - required = set() - properties = [] - hybrid_name = name - def add_component(comp_schema, is_required): - if (ref := comp_schema.get('$ref')) is not None: - comp_schema = self._refs[ref] - - if 'properties' in comp_schema: - for prop_name, prop_schema in comp_schema['properties'].items(): - properties.append((prop_name, prop_schema)) - if is_required: - required.add(prop_name) - - for t in schema['allOf']: - if 'anyOf' in t: - for tt in t['anyOf']: - add_component(tt, is_required=False) - else: - add_component(t, is_required=True) - - return self._add_rule(rule_name, self._build_object_rule(properties, required, hybrid_name, additional_properties=None)) - - elif schema_type in (None, 'array') and ('items' in schema or 'prefixItems' in schema): - items = schema.get('items') or schema['prefixItems'] - if isinstance(items, list): - return self._add_rule( - rule_name, - '"[" space ' + - ' "," space '.join( - self.visit(item, f'{name}{"-" if name else ""}tuple-{i}') - for i, item in enumerate(items)) + - ' "]" space') - else: - item_rule_name = self.visit(items, f'{name}{"-" if name else ""}item') - min_items = schema.get("minItems", 0) - max_items = schema.get("maxItems") - return self._add_rule(rule_name, '"[" space ' + _build_repetition(item_rule_name, min_items, max_items, separator_rule='"," space') + ' "]" space') - - elif schema_type in (None, 'string') and 'pattern' in schema: - return self._visit_pattern(schema['pattern'], rule_name) - - elif schema_type in (None, 'string') and re.match(r'^uuid[1-5]?$', schema_format or ''): - return self._add_primitive( - 'root' if rule_name == 'root' else schema_format, - PRIMITIVE_RULES['uuid'] - ) - - elif schema_type in (None, 'string') and f'{schema_format}-string' in STRING_FORMAT_RULES: - prim_name = f'{schema_format}-string' - return self._add_rule(rule_name, self._add_primitive(prim_name, STRING_FORMAT_RULES[prim_name])) - - elif schema_type == 'string' and ('minLength' in schema or 'maxLength' in schema): - char_rule = self._add_primitive('char', PRIMITIVE_RULES['char']) - min_len = schema.get('minLength', 0) - max_len = schema.get('maxLength') - - return self._add_rule(rule_name, r'"\"" ' + _build_repetition(char_rule, min_len, max_len) + r' "\"" space') - - elif schema_type in (None, 'integer') and \ - ('minimum' in schema or 'exclusiveMinimum' in schema or 'maximum' in schema or 'exclusiveMaximum' in schema): - min_value = None - max_value = None - if 'minimum' in schema: - min_value = schema['minimum'] - elif 'exclusiveMinimum' in schema: - min_value = schema['exclusiveMinimum'] + 1 - if 'maximum' in schema: - max_value = schema['maximum'] - elif 'exclusiveMaximum' in schema: - max_value = schema['exclusiveMaximum'] - 1 - - out = ["("] - _generate_min_max_int(min_value, max_value, out) - out.append(") space") - return self._add_rule(rule_name, ''.join(out)) - - elif (schema_type == 'object') or (len(schema) == 0): - return self._add_rule(rule_name, self._add_primitive('object', PRIMITIVE_RULES['object'])) - - else: - assert schema_type in PRIMITIVE_RULES, f'Unrecognized schema: {schema}' - # TODO: support minimum, maximum, exclusiveMinimum, exclusiveMaximum at least for zero - return self._add_primitive('root' if rule_name == 'root' else schema_type, PRIMITIVE_RULES[schema_type]) - - def _add_primitive(self, name: str, rule: BuiltinRule): - n = self._add_rule(name, rule.content) - - for dep in rule.deps: - dep_rule = PRIMITIVE_RULES.get(dep) or STRING_FORMAT_RULES.get(dep) - assert dep_rule, f'Rule {dep} not known' - if dep not in self._rules: - self._add_primitive(dep, dep_rule) - return n - - def _build_object_rule(self, properties: List[Tuple[str, Any]], required: Set[str], name: str, additional_properties: Optional[Union[bool, Any]]): - prop_order = self._prop_order - # sort by position in prop_order (if specified) then by original order - sorted_props = [kv[0] for _, kv in sorted(enumerate(properties), key=lambda ikv: (prop_order.get(ikv[1][0], len(prop_order)), ikv[0]))] - - prop_kv_rule_names = {} - for prop_name, prop_schema in properties: - prop_rule_name = self.visit(prop_schema, f'{name}{"-" if name else ""}{prop_name}') - prop_kv_rule_names[prop_name] = self._add_rule( - f'{name}{"-" if name else ""}{prop_name}-kv', - fr'{self._format_literal(json.dumps(prop_name))} space ":" space {prop_rule_name}' - ) - required_props = [k for k in sorted_props if k in required] - optional_props = [k for k in sorted_props if k not in required] - - if additional_properties is not None and additional_properties != False: - sub_name = f'{name}{"-" if name else ""}additional' - value_rule = self.visit(additional_properties, f'{sub_name}-value') if isinstance(additional_properties, dict) else \ - self._add_primitive('value', PRIMITIVE_RULES['value']) - key_rule = self._add_primitive('string', PRIMITIVE_RULES['string']) if not sorted_props \ - else self._add_rule(f'{sub_name}-k', self._not_strings(sorted_props)) - - prop_kv_rule_names["*"] = self._add_rule( - f'{sub_name}-kv', - f'{key_rule} ":" space {value_rule}' - ) - optional_props.append("*") - - rule = '"{" space ' - rule += ' "," space '.join(prop_kv_rule_names[k] for k in required_props) - - if optional_props: - rule += ' (' - if required_props: - rule += ' "," space ( ' - - def get_recursive_refs(ks, first_is_optional): - [k, *rest] = ks - kv_rule_name = prop_kv_rule_names[k] - comma_ref = f'( "," space {kv_rule_name} )' - if first_is_optional: - res = comma_ref + ('*' if k == '*' else '?') - else: - res = kv_rule_name + (' ' + comma_ref + "*" if k == '*' else '') - if len(rest) > 0: - res += ' ' + self._add_rule( - f'{name}{"-" if name else ""}{k}-rest', - get_recursive_refs(rest, first_is_optional=True) - ) - return res - - rule += ' | '.join( - get_recursive_refs(optional_props[i:], first_is_optional=False) - for i in range(len(optional_props)) - ) - if required_props: - rule += ' )' - rule += ' )?' - - rule += ' "}" space' - - return rule - - def format_grammar(self): - return '\n'.join( - f'{name} ::= {rule}' - for name, rule in sorted(self._rules.items(), key=lambda kv: kv[0]) - ) - - -def main(args_in = None): - parser = argparse.ArgumentParser( - description=''' - Generates a grammar (suitable for use in ./llama-cli) that produces JSON conforming to a - given JSON schema. Only a subset of JSON schema features are supported; more may be - added in the future. - ''', - ) - parser.add_argument( - '--prop-order', - default=[], - type=lambda s: s.split(','), - help=''' - comma-separated property names defining the order of precedence for object properties; - properties not specified here are given lower precedence than those that are, and - are kept in their original order from the schema. Required properties are always - given precedence over optional properties. - ''' - ) - parser.add_argument( - '--allow-fetch', - action='store_true', - default=False, - help='Whether to allow fetching referenced schemas over HTTPS') - parser.add_argument( - '--dotall', - action='store_true', - default=False, - help='Whether to treat dot (".") as matching all chars including line breaks in regular expression patterns') - parser.add_argument( - '--raw-pattern', - action='store_true', - default=False, - help='Treats string patterns as raw patterns w/o quotes (or quote escapes)') - - parser.add_argument('schema', help='file containing JSON schema ("-" for stdin)') - args = parser.parse_args(args_in) - - if args.schema.startswith('https://'): - url = args.schema - import requests - schema = requests.get(url).json() - elif args.schema == '-': - url = 'stdin' - schema = json.load(sys.stdin) - else: - url = f'file://{args.schema}' - with open(args.schema) as f: - schema = json.load(f) - converter = SchemaConverter( - prop_order={name: idx for idx, name in enumerate(args.prop_order)}, - allow_fetch=args.allow_fetch, - dotall=args.dotall, - raw_pattern=args.raw_pattern) - schema = converter.resolve_refs(schema, url) - converter.visit(schema, '') - print(converter.format_grammar()) - - -if __name__ == '__main__': - main() diff --git a/examples/llama.vim b/examples/llama.vim deleted file mode 100644 index 1b5ad6ba0..000000000 --- a/examples/llama.vim +++ /dev/null @@ -1,135 +0,0 @@ -" Requires an already running llama.cpp server -" To install either copy or symlink to ~/.vim/autoload/llama.vim -" Then start with either :call llama#doLlamaGen(), -" or add a keybind to your vimrc such as -" nnoremap Z :call llama#doLlamaGen() -" Similarly, you could add an insert mode keybind with -" inoremap call llama#doLlamaGen() -" -" g:llama_api_url, g:llama_api_key and g:llama_overrides can be configured in your .vimrc -" let g:llama_api_url = "192.168.1.10:8080" -" llama_overrides can also be set through buffer/window scopes. For instance -" autocmd filetype python let b:llama_overrides = {"temp": 0.2} -" Could be added to your .vimrc to automatically set a lower temperature when -" editing a python script -" Additionally, an override dict can be stored at the top of a file -" !*{"stop": ["User:"]} -" Could be added to the start of your chatlog.txt to set the stopping token -" These parameter dicts are merged together from lowest to highest priority: -" server default -> g:llama_overrides -> w:llama_overrides -> -" b:llama_overrides -> in file (!*) overrides -" -" Sublists (like logit_bias and stop) are overridden, not merged -" Example override: -" !*{"logit_bias": [[13, -5], [2, false]], "temperature": 1, "top_k": 5, "top_p": 0.5, "n_predict": 256, "repeat_last_n": 256, "repeat_penalty": 1.17647} -if !exists("g:llama_api_url") - let g:llama_api_url= "127.0.0.1:8080" -endif -if !exists("g:llama_overrides") - let g:llama_overrides = {} -endif -const s:querydata = {"n_predict": 256, "stop": [ "\n" ], "stream": v:true } -const s:curlcommand = ['curl','--data-raw', "{\"prompt\":\"### System:\"}", '--silent', '--no-buffer', '--request', 'POST', '--url', g:llama_api_url .. '/completion', '--header', "Content-Type: application/json"] -let s:linedict = {} - -func s:callbackHandler(bufn, channel, msg) - if len(a:msg) < 3 - return - elseif a:msg[0] == "d" - let l:msg = a:msg[6:-1] - else - let l:msg = a:msg - endif - let l:decoded_msg = json_decode(l:msg) - let l:newtext = split(l:decoded_msg['content'], "\n", 1) - if len(l:newtext) > 0 - call setbufline(a:bufn, s:linedict[a:bufn], getbufline(a:bufn, s:linedict[a:bufn])[0] .. newtext[0]) - else - echo "nothing genned" - endif - if len(newtext) > 1 - let l:failed = appendbufline(a:bufn, s:linedict[a:bufn], newtext[1:-1]) - let s:linedict[a:bufn] = s:linedict[a:bufn] + len(newtext)-1 - endif - if has_key(l:decoded_msg, "stop") && l:decoded_msg.stop - echo "Finished generation" - endif -endfunction - -func llama#doLlamaGen() - if exists("b:job") - if job_status(b:job) == "run" - call job_stop(b:job) - return - endif - endif - - let l:cbuffer = bufnr("%") - let s:linedict[l:cbuffer] = line('$') - let l:buflines = getbufline(l:cbuffer, 1, 1000) - let l:querydata = copy(s:querydata) - call extend(l:querydata, g:llama_overrides) - if exists("w:llama_overrides") - call extend(l:querydata, w:llama_overrides) - endif - if exists("b:llama_overrides") - call extend(l:querydata, b:llama_overrides) - endif - if l:buflines[0][0:1] == '!*' - let l:userdata = json_decode(l:buflines[0][2:-1]) - call extend(l:querydata, l:userdata) - let l:buflines = l:buflines[1:-1] - endif - let l:querydata.prompt = join(l:buflines, "\n") - let l:curlcommand = copy(s:curlcommand) - if exists("g:llama_api_key") - call extend(l:curlcommand, ['--header', 'Authorization: Bearer ' .. g:llama_api_key]) - endif - let l:curlcommand[2] = json_encode(l:querydata) - let b:job = job_start(l:curlcommand, {"callback": function("s:callbackHandler", [l:cbuffer])}) -endfunction - -" Echos the tokkenization of the provided string , or cursor to end of word -" Onus is placed on the user to include the preceding space -func llama#tokenizeWord(...) - if (a:0 > 0) - let l:input = a:1 - else - exe "normal \"*ye" - let l:input = @* - endif - let l:querydata = {"content": l:input} - let l:curlcommand = copy(s:curlcommand) - let l:curlcommand[2] = json_encode(l:querydata) - let l:curlcommand[8] = g:llama_api_url .. "/tokenize" - let s:token_job = job_start(l:curlcommand, {"callback": function("s:tokenizeWordCallback", [l:input])}) -endfunction - -func s:tokenizeWordCallback(plaintext, channel, msg) - echo '"' .. a:plaintext ..'" - ' .. string(json_decode(a:msg).tokens) -endfunction - - -" Echos the token count of the entire buffer (or provided string) -" Example usage :echo llama#tokenCount() -func llama#tokenCount(...) - if (a:0 > 0) - let l:buflines = a:1 - else - let l:buflines = getline(1,1000) - if l:buflines[0][0:1] == '!*' - let l:buflines = l:buflines[1:-1] - endif - let l:buflines = join(l:buflines, "\n") - endif - let l:querydata = {"content": l:buflines} - let l:curlcommand = copy(s:curlcommand) - let l:curlcommand[2] = json_encode(l:querydata) - let l:curlcommand[8] = g:llama_api_url .. "/tokenize" - let s:token_job = job_start(l:curlcommand, {"callback": "s:tokenCountCallback"}) -endfunction - -func s:tokenCountCallback(channel, msg) - let resp = json_decode(a:msg) - echo len(resp.tokens) -endfunction diff --git a/examples/llm.vim b/examples/llm.vim deleted file mode 100644 index d580a3d00..000000000 --- a/examples/llm.vim +++ /dev/null @@ -1,28 +0,0 @@ -" Basic plugin example - -function! Llm() - - let url = "http://127.0.0.1:8080/completion" - - " Get the content of the current buffer - let buffer_content = join(getline(1, '$'), "\n") - - " Create the JSON payload - let json_payload = {"temp":0.72,"top_k":100,"top_p":0.73,"repeat_penalty":1.100000023841858,"n_predict":256,"stop": ["\n\n\n"],"stream": v:false} - let json_payload.prompt = buffer_content - - " Define the curl command - let curl_command = 'curl -k -s -X POST -H "Content-Type: application/json" -d @- ' . url - let response = system(curl_command, json_encode(json_payload)) - - " Extract the content field from the response - let content = json_decode(response).content - - let split_newlines = split(content, '\n', 1) - - " Insert the content at the cursor position - call setline(line('.'), [ getline('.') . split_newlines[0] ] + split_newlines[1:]) -endfunction - -command! Llm call Llm() -noremap :Llm diff --git a/examples/pydantic_models_to_grammar.py b/examples/pydantic_models_to_grammar.py deleted file mode 100644 index 93e5dcb6c..000000000 --- a/examples/pydantic_models_to_grammar.py +++ /dev/null @@ -1,1322 +0,0 @@ -from __future__ import annotations - -import inspect -import json -import re -from copy import copy -from enum import Enum -from inspect import getdoc, isclass -from typing import TYPE_CHECKING, Any, Callable, List, Optional, Union, get_args, get_origin, get_type_hints - -from docstring_parser import parse -from pydantic import BaseModel, create_model - -if TYPE_CHECKING: - from types import GenericAlias -else: - # python 3.8 compat - from typing import _GenericAlias as GenericAlias - -# TODO: fix this -# pyright: reportAttributeAccessIssue=information - - -class PydanticDataType(Enum): - """ - Defines the data types supported by the grammar_generator. - - Attributes: - STRING (str): Represents a string data type. - BOOLEAN (str): Represents a boolean data type. - INTEGER (str): Represents an integer data type. - FLOAT (str): Represents a float data type. - OBJECT (str): Represents an object data type. - ARRAY (str): Represents an array data type. - ENUM (str): Represents an enum data type. - CUSTOM_CLASS (str): Represents a custom class data type. - """ - - STRING = "string" - TRIPLE_QUOTED_STRING = "triple_quoted_string" - MARKDOWN_CODE_BLOCK = "markdown_code_block" - BOOLEAN = "boolean" - INTEGER = "integer" - FLOAT = "float" - OBJECT = "object" - ARRAY = "array" - ENUM = "enum" - ANY = "any" - NULL = "null" - CUSTOM_CLASS = "custom-class" - CUSTOM_DICT = "custom-dict" - SET = "set" - - -def map_pydantic_type_to_gbnf(pydantic_type: type[Any]) -> str: - origin_type = get_origin(pydantic_type) - origin_type = pydantic_type if origin_type is None else origin_type - - if isclass(origin_type) and issubclass(origin_type, str): - return PydanticDataType.STRING.value - elif isclass(origin_type) and issubclass(origin_type, bool): - return PydanticDataType.BOOLEAN.value - elif isclass(origin_type) and issubclass(origin_type, int): - return PydanticDataType.INTEGER.value - elif isclass(origin_type) and issubclass(origin_type, float): - return PydanticDataType.FLOAT.value - elif isclass(origin_type) and issubclass(origin_type, Enum): - return PydanticDataType.ENUM.value - - elif isclass(origin_type) and issubclass(origin_type, BaseModel): - return format_model_and_field_name(origin_type.__name__) - elif origin_type is list: - element_type = get_args(pydantic_type)[0] - return f"{map_pydantic_type_to_gbnf(element_type)}-list" - elif origin_type is set: - element_type = get_args(pydantic_type)[0] - return f"{map_pydantic_type_to_gbnf(element_type)}-set" - elif origin_type is Union: - union_types = get_args(pydantic_type) - union_rules = [map_pydantic_type_to_gbnf(ut) for ut in union_types] - return f"union-{'-or-'.join(union_rules)}" - elif origin_type is Optional: - element_type = get_args(pydantic_type)[0] - return f"optional-{map_pydantic_type_to_gbnf(element_type)}" - elif isclass(origin_type): - return f"{PydanticDataType.CUSTOM_CLASS.value}-{format_model_and_field_name(origin_type.__name__)}" - elif origin_type is dict: - key_type, value_type = get_args(pydantic_type) - return f"custom-dict-key-type-{format_model_and_field_name(map_pydantic_type_to_gbnf(key_type))}-value-type-{format_model_and_field_name(map_pydantic_type_to_gbnf(value_type))}" - else: - return "unknown" - - -def format_model_and_field_name(model_name: str) -> str: - parts = re.findall("[A-Z][^A-Z]*", model_name) - if not parts: # Check if the list is empty - return model_name.lower().replace("_", "-") - return "-".join(part.lower().replace("_", "-") for part in parts) - - -def generate_list_rule(element_type): - """ - Generate a GBNF rule for a list of a given element type. - - :param element_type: The type of the elements in the list (e.g., 'string'). - :return: A string representing the GBNF rule for a list of the given type. - """ - rule_name = f"{map_pydantic_type_to_gbnf(element_type)}-list" - element_rule = map_pydantic_type_to_gbnf(element_type) - list_rule = rf'{rule_name} ::= "[" {element_rule} ("," {element_rule})* "]"' - return list_rule - - -def get_members_structure(cls, rule_name): - if issubclass(cls, Enum): - # Handle Enum types - members = [f'"\\"{member.value}\\""' for name, member in cls.__members__.items()] - return f"{cls.__name__.lower()} ::= " + " | ".join(members) - if cls.__annotations__ and cls.__annotations__ != {}: - result = f'{rule_name} ::= "{{"' - # Modify this comprehension - members = [ - f' "\\"{name}\\"" ":" {map_pydantic_type_to_gbnf(param_type)}' - for name, param_type in get_type_hints(cls).items() - if name != "self" - ] - - result += '"," '.join(members) - result += ' "}"' - return result - if rule_name == "custom-class-any": - result = f"{rule_name} ::= " - result += "value" - return result - - init_signature = inspect.signature(cls.__init__) - parameters = init_signature.parameters - result = f'{rule_name} ::= "{{"' - # Modify this comprehension too - members = [ - f' "\\"{name}\\"" ":" {map_pydantic_type_to_gbnf(param.annotation)}' - for name, param in parameters.items() - if name != "self" and param.annotation != inspect.Parameter.empty - ] - - result += '", "'.join(members) - result += ' "}"' - return result - - -def regex_to_gbnf(regex_pattern: str) -> str: - """ - Translate a basic regex pattern to a GBNF rule. - Note: This function handles only a subset of simple regex patterns. - """ - gbnf_rule = regex_pattern - - # Translate common regex components to GBNF - gbnf_rule = gbnf_rule.replace("\\d", "[0-9]") - gbnf_rule = gbnf_rule.replace("\\s", "[ \t\n]") - - # Handle quantifiers and other regex syntax that is similar in GBNF - # (e.g., '*', '+', '?', character classes) - - return gbnf_rule - - -def generate_gbnf_integer_rules(max_digit=None, min_digit=None): - """ - - Generate GBNF Integer Rules - - Generates GBNF (Generalized Backus-Naur Form) rules for integers based on the given maximum and minimum digits. - - Parameters: - max_digit (int): The maximum number of digits for the integer. Default is None. - min_digit (int): The minimum number of digits for the integer. Default is None. - - Returns: - integer_rule (str): The identifier for the integer rule generated. - additional_rules (list): A list of additional rules generated based on the given maximum and minimum digits. - - """ - additional_rules = [] - - # Define the rule identifier based on max_digit and min_digit - integer_rule = "integer-part" - if max_digit is not None: - integer_rule += f"-max{max_digit}" - if min_digit is not None: - integer_rule += f"-min{min_digit}" - - # Handling Integer Rules - if max_digit is not None or min_digit is not None: - # Start with an empty rule part - integer_rule_part = "" - - # Add mandatory digits as per min_digit - if min_digit is not None: - integer_rule_part += "[0-9] " * min_digit - - # Add optional digits up to max_digit - if max_digit is not None: - optional_digits = max_digit - (min_digit if min_digit is not None else 0) - integer_rule_part += "".join(["[0-9]? " for _ in range(optional_digits)]) - - # Trim the rule part and append it to additional rules - integer_rule_part = integer_rule_part.strip() - if integer_rule_part: - additional_rules.append(f"{integer_rule} ::= {integer_rule_part}") - - return integer_rule, additional_rules - - -def generate_gbnf_float_rules(max_digit=None, min_digit=None, max_precision=None, min_precision=None): - """ - Generate GBNF float rules based on the given constraints. - - :param max_digit: Maximum number of digits in the integer part (default: None) - :param min_digit: Minimum number of digits in the integer part (default: None) - :param max_precision: Maximum number of digits in the fractional part (default: None) - :param min_precision: Minimum number of digits in the fractional part (default: None) - :return: A tuple containing the float rule and additional rules as a list - - Example Usage: - max_digit = 3 - min_digit = 1 - max_precision = 2 - min_precision = 1 - generate_gbnf_float_rules(max_digit, min_digit, max_precision, min_precision) - - Output: - ('float-3-1-2-1', ['integer-part-max3-min1 ::= [0-9] [0-9] [0-9]?', 'fractional-part-max2-min1 ::= [0-9] [0-9]?', 'float-3-1-2-1 ::= integer-part-max3-min1 "." fractional-part-max2-min - *1']) - - Note: - GBNF stands for Generalized Backus-Naur Form, which is a notation technique to specify the syntax of programming languages or other formal grammars. - """ - additional_rules = [] - - # Define the integer part rule - integer_part_rule = ( - "integer-part" - + (f"-max{max_digit}" if max_digit is not None else "") - + (f"-min{min_digit}" if min_digit is not None else "") - ) - - # Define the fractional part rule based on precision constraints - fractional_part_rule = "fractional-part" - fractional_rule_part = "" - if max_precision is not None or min_precision is not None: - fractional_part_rule += (f"-max{max_precision}" if max_precision is not None else "") + ( - f"-min{min_precision}" if min_precision is not None else "" - ) - # Minimum number of digits - fractional_rule_part = "[0-9]" * (min_precision if min_precision is not None else 1) - # Optional additional digits - fractional_rule_part += "".join( - [" [0-9]?"] * ((max_precision - ( - min_precision if min_precision is not None else 1)) if max_precision is not None else 0) - ) - additional_rules.append(f"{fractional_part_rule} ::= {fractional_rule_part}") - - # Define the float rule - float_rule = f"float-{max_digit if max_digit is not None else 'X'}-{min_digit if min_digit is not None else 'X'}-{max_precision if max_precision is not None else 'X'}-{min_precision if min_precision is not None else 'X'}" - additional_rules.append(f'{float_rule} ::= {integer_part_rule} "." {fractional_part_rule}') - - # Generating the integer part rule definition, if necessary - if max_digit is not None or min_digit is not None: - integer_rule_part = "[0-9]" - if min_digit is not None and min_digit > 1: - integer_rule_part += " [0-9]" * (min_digit - 1) - if max_digit is not None: - integer_rule_part += "".join([" [0-9]?"] * (max_digit - (min_digit if min_digit is not None else 1))) - additional_rules.append(f"{integer_part_rule} ::= {integer_rule_part.strip()}") - - return float_rule, additional_rules - - -def generate_gbnf_rule_for_type( - model_name, field_name, field_type, is_optional, processed_models, created_rules, field_info=None -) -> tuple[str, list[str]]: - """ - Generate GBNF rule for a given field type. - - :param model_name: Name of the model. - - :param field_name: Name of the field. - :param field_type: Type of the field. - :param is_optional: Whether the field is optional. - :param processed_models: List of processed models. - :param created_rules: List of created rules. - :param field_info: Additional information about the field (optional). - - :return: Tuple containing the GBNF type and a list of additional rules. - :rtype: tuple[str, list] - """ - rules = [] - - field_name = format_model_and_field_name(field_name) - gbnf_type = map_pydantic_type_to_gbnf(field_type) - - origin_type = get_origin(field_type) - origin_type = field_type if origin_type is None else origin_type - - if isclass(origin_type) and issubclass(origin_type, BaseModel): - nested_model_name = format_model_and_field_name(field_type.__name__) - nested_model_rules, _ = generate_gbnf_grammar(field_type, processed_models, created_rules) - rules.extend(nested_model_rules) - gbnf_type, rules = nested_model_name, rules - elif isclass(origin_type) and issubclass(origin_type, Enum): - enum_values = [f'"\\"{e.value}\\""' for e in field_type] # Adding escaped quotes - enum_rule = f"{model_name}-{field_name} ::= {' | '.join(enum_values)}" - rules.append(enum_rule) - gbnf_type, rules = model_name + "-" + field_name, rules - elif origin_type is list: # Array - element_type = get_args(field_type)[0] - element_rule_name, additional_rules = generate_gbnf_rule_for_type( - model_name, f"{field_name}-element", element_type, is_optional, processed_models, created_rules - ) - rules.extend(additional_rules) - array_rule = f"""{model_name}-{field_name} ::= "[" ws {element_rule_name} ("," ws {element_rule_name})* "]" """ - rules.append(array_rule) - gbnf_type, rules = model_name + "-" + field_name, rules - - elif origin_type is set: # Array - element_type = get_args(field_type)[0] - element_rule_name, additional_rules = generate_gbnf_rule_for_type( - model_name, f"{field_name}-element", element_type, is_optional, processed_models, created_rules - ) - rules.extend(additional_rules) - array_rule = f"""{model_name}-{field_name} ::= "[" ws {element_rule_name} ("," ws {element_rule_name})* "]" """ - rules.append(array_rule) - gbnf_type, rules = model_name + "-" + field_name, rules - - elif gbnf_type.startswith("custom-class-"): - rules.append(get_members_structure(field_type, gbnf_type)) - elif gbnf_type.startswith("custom-dict-"): - key_type, value_type = get_args(field_type) - - additional_key_type, additional_key_rules = generate_gbnf_rule_for_type( - model_name, f"{field_name}-key-type", key_type, is_optional, processed_models, created_rules - ) - additional_value_type, additional_value_rules = generate_gbnf_rule_for_type( - model_name, f"{field_name}-value-type", value_type, is_optional, processed_models, created_rules - ) - gbnf_type = rf'{gbnf_type} ::= "{{" ( {additional_key_type} ": " {additional_value_type} ("," "\n" ws {additional_key_type} ":" {additional_value_type})* )? "}}" ' - - rules.extend(additional_key_rules) - rules.extend(additional_value_rules) - elif gbnf_type.startswith("union-"): - union_types = get_args(field_type) - union_rules = [] - - for union_type in union_types: - if isinstance(union_type, GenericAlias): - union_gbnf_type, union_rules_list = generate_gbnf_rule_for_type( - model_name, field_name, union_type, False, processed_models, created_rules - ) - union_rules.append(union_gbnf_type) - rules.extend(union_rules_list) - - elif not issubclass(union_type, type(None)): - union_gbnf_type, union_rules_list = generate_gbnf_rule_for_type( - model_name, field_name, union_type, False, processed_models, created_rules - ) - union_rules.append(union_gbnf_type) - rules.extend(union_rules_list) - - # Defining the union grammar rule separately - if len(union_rules) == 1: - union_grammar_rule = f"{model_name}-{field_name}-optional ::= {' | '.join(union_rules)} | null" - else: - union_grammar_rule = f"{model_name}-{field_name}-union ::= {' | '.join(union_rules)}" - rules.append(union_grammar_rule) - if len(union_rules) == 1: - gbnf_type = f"{model_name}-{field_name}-optional" - else: - gbnf_type = f"{model_name}-{field_name}-union" - elif isclass(origin_type) and issubclass(origin_type, str): - if field_info and hasattr(field_info, "json_schema_extra") and field_info.json_schema_extra is not None: - triple_quoted_string = field_info.json_schema_extra.get("triple_quoted_string", False) - markdown_string = field_info.json_schema_extra.get("markdown_code_block", False) - - gbnf_type = PydanticDataType.TRIPLE_QUOTED_STRING.value if triple_quoted_string else PydanticDataType.STRING.value - gbnf_type = PydanticDataType.MARKDOWN_CODE_BLOCK.value if markdown_string else gbnf_type - - elif field_info and hasattr(field_info, "pattern"): - # Convert regex pattern to grammar rule - regex_pattern = field_info.regex.pattern - gbnf_type = f"pattern-{field_name} ::= {regex_to_gbnf(regex_pattern)}" - else: - gbnf_type = PydanticDataType.STRING.value - - elif ( - isclass(origin_type) - and issubclass(origin_type, float) - and field_info - and hasattr(field_info, "json_schema_extra") - and field_info.json_schema_extra is not None - ): - # Retrieve precision attributes for floats - max_precision = ( - field_info.json_schema_extra.get("max_precision") if field_info and hasattr(field_info, - "json_schema_extra") else None - ) - min_precision = ( - field_info.json_schema_extra.get("min_precision") if field_info and hasattr(field_info, - "json_schema_extra") else None - ) - max_digits = field_info.json_schema_extra.get("max_digit") if field_info and hasattr(field_info, - "json_schema_extra") else None - min_digits = field_info.json_schema_extra.get("min_digit") if field_info and hasattr(field_info, - "json_schema_extra") else None - - # Generate GBNF rule for float with given attributes - gbnf_type, rules = generate_gbnf_float_rules( - max_digit=max_digits, min_digit=min_digits, max_precision=max_precision, min_precision=min_precision - ) - - elif ( - isclass(origin_type) - and issubclass(origin_type, int) - and field_info - and hasattr(field_info, "json_schema_extra") - and field_info.json_schema_extra is not None - ): - # Retrieve digit attributes for integers - max_digits = field_info.json_schema_extra.get("max_digit") if field_info and hasattr(field_info, - "json_schema_extra") else None - min_digits = field_info.json_schema_extra.get("min_digit") if field_info and hasattr(field_info, - "json_schema_extra") else None - - # Generate GBNF rule for integer with given attributes - gbnf_type, rules = generate_gbnf_integer_rules(max_digit=max_digits, min_digit=min_digits) - else: - gbnf_type, rules = gbnf_type, [] - - return gbnf_type, rules - - -def generate_gbnf_grammar(model: type[BaseModel], processed_models: set[type[BaseModel]], created_rules: dict[str, list[str]]) -> tuple[list[str], bool]: - """ - - Generate GBnF Grammar - - Generates a GBnF grammar for a given model. - - :param model: A Pydantic model class to generate the grammar for. Must be a subclass of BaseModel. - :param processed_models: A set of already processed models to prevent infinite recursion. - :param created_rules: A dict containing already created rules to prevent duplicates. - :return: A list of GBnF grammar rules in string format. And two booleans indicating if an extra markdown or triple quoted string is in the grammar. - Example Usage: - ``` - model = MyModel - processed_models = set() - created_rules = dict() - - gbnf_grammar = generate_gbnf_grammar(model, processed_models, created_rules) - ``` - """ - if model in processed_models: - return [], False - - processed_models.add(model) - model_name = format_model_and_field_name(model.__name__) - - if not issubclass(model, BaseModel): - # For non-Pydantic classes, generate model_fields from __annotations__ or __init__ - if hasattr(model, "__annotations__") and model.__annotations__: - model_fields = {name: (typ, ...) for name, typ in get_type_hints(model).items()} - else: - init_signature = inspect.signature(model.__init__) - parameters = init_signature.parameters - model_fields = {name: (param.annotation, param.default) for name, param in parameters.items() if - name != "self"} - else: - # For Pydantic models, use model_fields and check for ellipsis (required fields) - model_fields = get_type_hints(model) - - model_rule_parts = [] - nested_rules = [] - has_markdown_code_block = False - has_triple_quoted_string = False - look_for_markdown_code_block = False - look_for_triple_quoted_string = False - for field_name, field_info in model_fields.items(): - if not issubclass(model, BaseModel): - field_type, default_value = field_info - # Check if the field is optional (not required) - is_optional = (default_value is not inspect.Parameter.empty) and (default_value is not Ellipsis) - else: - field_type = field_info - field_info = model.model_fields[field_name] - is_optional = field_info.is_required is False and get_origin(field_type) is Optional - rule_name, additional_rules = generate_gbnf_rule_for_type( - model_name, format_model_and_field_name(field_name), field_type, is_optional, processed_models, - created_rules, field_info - ) - look_for_markdown_code_block = True if rule_name == "markdown_code_block" else False - look_for_triple_quoted_string = True if rule_name == "triple_quoted_string" else False - if not look_for_markdown_code_block and not look_for_triple_quoted_string: - if rule_name not in created_rules: - created_rules[rule_name] = additional_rules - model_rule_parts.append(f' ws "\\"{field_name}\\"" ":" ws {rule_name}') # Adding escaped quotes - nested_rules.extend(additional_rules) - else: - has_triple_quoted_string = look_for_triple_quoted_string - has_markdown_code_block = look_for_markdown_code_block - - fields_joined = r' "," "\n" '.join(model_rule_parts) - model_rule = rf'{model_name} ::= "{{" "\n" {fields_joined} "\n" ws "}}"' - - has_special_string = False - if has_triple_quoted_string: - model_rule += '"\\n" ws "}"' - model_rule += '"\\n" triple-quoted-string' - has_special_string = True - if has_markdown_code_block: - model_rule += '"\\n" ws "}"' - model_rule += '"\\n" markdown-code-block' - has_special_string = True - all_rules = [model_rule] + nested_rules - - return all_rules, has_special_string - - -def generate_gbnf_grammar_from_pydantic_models( - models: list[type[BaseModel]], outer_object_name: str | None = None, outer_object_content: str | None = None, - list_of_outputs: bool = False -) -> str: - """ - Generate GBNF Grammar from Pydantic Models. - - This method takes a list of Pydantic models and uses them to generate a GBNF grammar string. The generated grammar string can be used for parsing and validating data using the generated - * grammar. - - Args: - models (list[type[BaseModel]]): A list of Pydantic models to generate the grammar from. - outer_object_name (str): Outer object name for the GBNF grammar. If None, no outer object will be generated. Eg. "function" for function calling. - outer_object_content (str): Content for the outer rule in the GBNF grammar. Eg. "function_parameters" or "params" for function calling. - list_of_outputs (str, optional): Allows a list of output objects - Returns: - str: The generated GBNF grammar string. - - Examples: - models = [UserModel, PostModel] - grammar = generate_gbnf_grammar_from_pydantic(models) - print(grammar) - # Output: - # root ::= UserModel | PostModel - # ... - """ - processed_models: set[type[BaseModel]] = set() - all_rules = [] - created_rules: dict[str, list[str]] = {} - if outer_object_name is None: - for model in models: - model_rules, _ = generate_gbnf_grammar(model, processed_models, created_rules) - all_rules.extend(model_rules) - - if list_of_outputs: - root_rule = r'root ::= (" "| "\n") "[" ws grammar-models ("," ws grammar-models)* ws "]"' + "\n" - else: - root_rule = r'root ::= (" "| "\n") grammar-models' + "\n" - root_rule += "grammar-models ::= " + " | ".join( - [format_model_and_field_name(model.__name__) for model in models]) - all_rules.insert(0, root_rule) - return "\n".join(all_rules) - elif outer_object_name is not None: - if list_of_outputs: - root_rule = ( - rf'root ::= (" "| "\n") "[" ws {format_model_and_field_name(outer_object_name)} ("," ws {format_model_and_field_name(outer_object_name)})* ws "]"' - + "\n" - ) - else: - root_rule = f"root ::= {format_model_and_field_name(outer_object_name)}\n" - - model_rule = ( - rf'{format_model_and_field_name(outer_object_name)} ::= (" "| "\n") "{{" ws "\"{outer_object_name}\"" ":" ws grammar-models' - ) - - fields_joined = " | ".join( - [rf"{format_model_and_field_name(model.__name__)}-grammar-model" for model in models]) - - grammar_model_rules = f"\ngrammar-models ::= {fields_joined}" - mod_rules = [] - for model in models: - mod_rule = rf"{format_model_and_field_name(model.__name__)}-grammar-model ::= " - mod_rule += ( - rf'"\"{model.__name__}\"" "," ws "\"{outer_object_content}\"" ":" ws {format_model_and_field_name(model.__name__)}' + "\n" - ) - mod_rules.append(mod_rule) - grammar_model_rules += "\n" + "\n".join(mod_rules) - - for model in models: - model_rules, has_special_string = generate_gbnf_grammar(model, processed_models, - created_rules) - - if not has_special_string: - model_rules[0] += r'"\n" ws "}"' - - all_rules.extend(model_rules) - - all_rules.insert(0, root_rule + model_rule + grammar_model_rules) - return "\n".join(all_rules) - - -def get_primitive_grammar(grammar): - """ - Returns the needed GBNF primitive grammar for a given GBNF grammar string. - - Args: - grammar (str): The string containing the GBNF grammar. - - Returns: - str: GBNF primitive grammar string. - """ - type_list: list[type[object]] = [] - if "string-list" in grammar: - type_list.append(str) - if "boolean-list" in grammar: - type_list.append(bool) - if "integer-list" in grammar: - type_list.append(int) - if "float-list" in grammar: - type_list.append(float) - additional_grammar = [generate_list_rule(t) for t in type_list] - primitive_grammar = r""" -boolean ::= "true" | "false" -null ::= "null" -string ::= "\"" ( - [^"\\] | - "\\" (["\\/bfnrt] | "u" [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F] [0-9a-fA-F]) - )* "\"" ws -ws ::= ([ \t\n] ws)? -float ::= ("-"? ([0] | [1-9] [0-9]*)) ("." [0-9]+)? ([eE] [-+]? [0-9]+)? ws - -integer ::= [0-9]+""" - - any_block = "" - if "custom-class-any" in grammar: - any_block = """ -value ::= object | array | string | number | boolean | null - -object ::= - "{" ws ( - string ":" ws value - ("," ws string ":" ws value)* - )? "}" ws - -array ::= - "[" ws ( - value - ("," ws value)* - )? "]" ws - -number ::= integer | float""" - - markdown_code_block_grammar = "" - if "markdown-code-block" in grammar: - markdown_code_block_grammar = r''' -markdown-code-block ::= opening-triple-ticks markdown-code-block-content closing-triple-ticks -markdown-code-block-content ::= ( [^`] | "`" [^`] | "`" "`" [^`] )* -opening-triple-ticks ::= "```" "python" "\n" | "```" "c" "\n" | "```" "cpp" "\n" | "```" "txt" "\n" | "```" "text" "\n" | "```" "json" "\n" | "```" "javascript" "\n" | "```" "css" "\n" | "```" "html" "\n" | "```" "markdown" "\n" -closing-triple-ticks ::= "```" "\n"''' - - if "triple-quoted-string" in grammar: - markdown_code_block_grammar = r""" -triple-quoted-string ::= triple-quotes triple-quoted-string-content triple-quotes -triple-quoted-string-content ::= ( [^'] | "'" [^'] | "'" "'" [^'] )* -triple-quotes ::= "'''" """ - return "\n" + "\n".join(additional_grammar) + any_block + primitive_grammar + markdown_code_block_grammar - - -def generate_markdown_documentation( - pydantic_models: list[type[BaseModel]], model_prefix="Model", fields_prefix="Fields", - documentation_with_field_description=True -) -> str: - """ - Generate markdown documentation for a list of Pydantic models. - - Args: - pydantic_models (list[type[BaseModel]]): list of Pydantic model classes. - model_prefix (str): Prefix for the model section. - fields_prefix (str): Prefix for the fields section. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - str: Generated text documentation. - """ - documentation = "" - pyd_models: list[tuple[type[BaseModel], bool]] = [(model, True) for model in pydantic_models] - for model, add_prefix in pyd_models: - if add_prefix: - documentation += f"{model_prefix}: {model.__name__}\n" - else: - documentation += f"Model: {model.__name__}\n" - - # Handling multi-line model description with proper indentation - - class_doc = getdoc(model) - base_class_doc = getdoc(BaseModel) - class_description = class_doc if class_doc and class_doc != base_class_doc else "" - if class_description != "": - documentation += " Description: " - documentation += format_multiline_description(class_description, 0) + "\n" - - if add_prefix: - # Indenting the fields section - documentation += f" {fields_prefix}:\n" - else: - documentation += f" Fields:\n" # noqa: F541 - if isclass(model) and issubclass(model, BaseModel): - for name, field_type in get_type_hints(model).items(): - # if name == "markdown_code_block": - # continue - if get_origin(field_type) == list: - element_type = get_args(field_type)[0] - if isclass(element_type) and issubclass(element_type, BaseModel): - pyd_models.append((element_type, False)) - if get_origin(field_type) == Union: - element_types = get_args(field_type) - for element_type in element_types: - if isclass(element_type) and issubclass(element_type, BaseModel): - pyd_models.append((element_type, False)) - documentation += generate_field_markdown( - name, field_type, model, documentation_with_field_description=documentation_with_field_description - ) - documentation += "\n" - - if hasattr(model, "Config") and hasattr(model.Config, - "json_schema_extra") and "example" in model.Config.json_schema_extra: - documentation += f" Expected Example Output for {format_model_and_field_name(model.__name__)}:\n" - json_example = json.dumps(model.Config.json_schema_extra["example"]) - documentation += format_multiline_description(json_example, 2) + "\n" - - return documentation - - -def generate_field_markdown( - field_name: str, field_type: type[Any], model: type[BaseModel], depth=1, - documentation_with_field_description=True -) -> str: - """ - Generate markdown documentation for a Pydantic model field. - - Args: - field_name (str): Name of the field. - field_type (type[Any]): Type of the field. - model (type[BaseModel]): Pydantic model class. - depth (int): Indentation depth in the documentation. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - str: Generated text documentation for the field. - """ - indent = " " * depth - - field_info = model.model_fields.get(field_name) - field_description = field_info.description if field_info and field_info.description else "" - - origin_type = get_origin(field_type) - origin_type = field_type if origin_type is None else origin_type - - if origin_type == list: - element_type = get_args(field_type)[0] - field_text = f"{indent}{field_name} ({format_model_and_field_name(field_type.__name__)} of {format_model_and_field_name(element_type.__name__)})" - if field_description != "": - field_text += ":\n" - else: - field_text += "\n" - elif origin_type == Union: - element_types = get_args(field_type) - types = [] - for element_type in element_types: - types.append(format_model_and_field_name(element_type.__name__)) - field_text = f"{indent}{field_name} ({' or '.join(types)})" - if field_description != "": - field_text += ":\n" - else: - field_text += "\n" - else: - field_text = f"{indent}{field_name} ({format_model_and_field_name(field_type.__name__)})" - if field_description != "": - field_text += ":\n" - else: - field_text += "\n" - - if not documentation_with_field_description: - return field_text - - if field_description != "": - field_text += f" Description: {field_description}\n" - - # Check for and include field-specific examples if available - if hasattr(model, "Config") and hasattr(model.Config, - "json_schema_extra") and "example" in model.Config.json_schema_extra: - field_example = model.Config.json_schema_extra["example"].get(field_name) - if field_example is not None: - example_text = f"'{field_example}'" if isinstance(field_example, str) else field_example - field_text += f"{indent} Example: {example_text}\n" - - if isclass(origin_type) and issubclass(origin_type, BaseModel): - field_text += f"{indent} Details:\n" - for name, type_ in get_type_hints(field_type).items(): - field_text += generate_field_markdown(name, type_, field_type, depth + 2) - - return field_text - - -def format_json_example(example: dict[str, Any], depth: int) -> str: - """ - Format a JSON example into a readable string with indentation. - - Args: - example (dict): JSON example to be formatted. - depth (int): Indentation depth. - - Returns: - str: Formatted JSON example string. - """ - indent = " " * depth - formatted_example = "{\n" - for key, value in example.items(): - value_text = f"'{value}'" if isinstance(value, str) else value - formatted_example += f"{indent}{key}: {value_text},\n" - formatted_example = formatted_example.rstrip(",\n") + "\n" + indent + "}" - return formatted_example - - -def generate_text_documentation( - pydantic_models: list[type[BaseModel]], model_prefix="Model", fields_prefix="Fields", - documentation_with_field_description=True -) -> str: - """ - Generate text documentation for a list of Pydantic models. - - Args: - pydantic_models (list[type[BaseModel]]): List of Pydantic model classes. - model_prefix (str): Prefix for the model section. - fields_prefix (str): Prefix for the fields section. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - str: Generated text documentation. - """ - documentation = "" - pyd_models: list[tuple[type[BaseModel], bool]] = [(model, True) for model in pydantic_models] - for model, add_prefix in pyd_models: - if add_prefix: - documentation += f"{model_prefix}: {model.__name__}\n" - else: - documentation += f"Model: {model.__name__}\n" - - # Handling multi-line model description with proper indentation - - class_doc = getdoc(model) - base_class_doc = getdoc(BaseModel) - class_description = class_doc if class_doc and class_doc != base_class_doc else "" - if class_description != "": - documentation += " Description: " - documentation += "\n" + format_multiline_description(class_description, 2) + "\n" - - if isclass(model) and issubclass(model, BaseModel): - documentation_fields = "" - for name, field_type in get_type_hints(model).items(): - # if name == "markdown_code_block": - # continue - if get_origin(field_type) == list: - element_type = get_args(field_type)[0] - if isclass(element_type) and issubclass(element_type, BaseModel): - pyd_models.append((element_type, False)) - if get_origin(field_type) == Union: - element_types = get_args(field_type) - for element_type in element_types: - if isclass(element_type) and issubclass(element_type, BaseModel): - pyd_models.append((element_type, False)) - documentation_fields += generate_field_text( - name, field_type, model, documentation_with_field_description=documentation_with_field_description - ) - if documentation_fields != "": - if add_prefix: - documentation += f" {fields_prefix}:\n{documentation_fields}" - else: - documentation += f" Fields:\n{documentation_fields}" - documentation += "\n" - - if hasattr(model, "Config") and hasattr(model.Config, - "json_schema_extra") and "example" in model.Config.json_schema_extra: - documentation += f" Expected Example Output for {format_model_and_field_name(model.__name__)}:\n" - json_example = json.dumps(model.Config.json_schema_extra["example"]) - documentation += format_multiline_description(json_example, 2) + "\n" - - return documentation - - -def generate_field_text( - field_name: str, field_type: type[Any], model: type[BaseModel], depth=1, - documentation_with_field_description=True -) -> str: - """ - Generate text documentation for a Pydantic model field. - - Args: - field_name (str): Name of the field. - field_type (type[Any]): Type of the field. - model (type[BaseModel]): Pydantic model class. - depth (int): Indentation depth in the documentation. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - str: Generated text documentation for the field. - """ - indent = " " * depth - - field_info = model.model_fields.get(field_name) - field_description = field_info.description if field_info and field_info.description else "" - - if get_origin(field_type) == list: - element_type = get_args(field_type)[0] - field_text = f"{indent}{field_name} ({format_model_and_field_name(field_type.__name__)} of {format_model_and_field_name(element_type.__name__)})" - if field_description != "": - field_text += ":\n" - else: - field_text += "\n" - elif get_origin(field_type) == Union: - element_types = get_args(field_type) - types = [] - for element_type in element_types: - types.append(format_model_and_field_name(element_type.__name__)) - field_text = f"{indent}{field_name} ({' or '.join(types)})" - if field_description != "": - field_text += ":\n" - else: - field_text += "\n" - else: - field_text = f"{indent}{field_name} ({format_model_and_field_name(field_type.__name__)})" - if field_description != "": - field_text += ":\n" - else: - field_text += "\n" - - if not documentation_with_field_description: - return field_text - - if field_description != "": - field_text += f"{indent} Description: " + field_description + "\n" - - # Check for and include field-specific examples if available - if hasattr(model, "Config") and hasattr(model.Config, - "json_schema_extra") and "example" in model.Config.json_schema_extra: - field_example = model.Config.json_schema_extra["example"].get(field_name) - if field_example is not None: - example_text = f"'{field_example}'" if isinstance(field_example, str) else field_example - field_text += f"{indent} Example: {example_text}\n" - - if isclass(field_type) and issubclass(field_type, BaseModel): - field_text += f"{indent} Details:\n" - for name, type_ in get_type_hints(field_type).items(): - field_text += generate_field_text(name, type_, field_type, depth + 2) - - return field_text - - -def format_multiline_description(description: str, indent_level: int) -> str: - """ - Format a multiline description with proper indentation. - - Args: - description (str): Multiline description. - indent_level (int): Indentation level. - - Returns: - str: Formatted multiline description. - """ - indent = " " * indent_level - return indent + description.replace("\n", "\n" + indent) - - -def save_gbnf_grammar_and_documentation( - grammar, documentation, grammar_file_path="./grammar.gbnf", documentation_file_path="./grammar_documentation.md" -): - """ - Save GBNF grammar and documentation to specified files. - - Args: - grammar (str): GBNF grammar string. - documentation (str): Documentation string. - grammar_file_path (str): File path to save the GBNF grammar. - documentation_file_path (str): File path to save the documentation. - - Returns: - None - """ - try: - with open(grammar_file_path, "w") as file: - file.write(grammar + get_primitive_grammar(grammar)) - print(f"Grammar successfully saved to {grammar_file_path}") - except IOError as e: - print(f"An error occurred while saving the grammar file: {e}") - - try: - with open(documentation_file_path, "w") as file: - file.write(documentation) - print(f"Documentation successfully saved to {documentation_file_path}") - except IOError as e: - print(f"An error occurred while saving the documentation file: {e}") - - -def remove_empty_lines(string): - """ - Remove empty lines from a string. - - Args: - string (str): Input string. - - Returns: - str: String with empty lines removed. - """ - lines = string.splitlines() - non_empty_lines = [line for line in lines if line.strip() != ""] - string_no_empty_lines = "\n".join(non_empty_lines) - return string_no_empty_lines - - -def generate_and_save_gbnf_grammar_and_documentation( - pydantic_model_list, - grammar_file_path="./generated_grammar.gbnf", - documentation_file_path="./generated_grammar_documentation.md", - outer_object_name: str | None = None, - outer_object_content: str | None = None, - model_prefix: str = "Output Model", - fields_prefix: str = "Output Fields", - list_of_outputs: bool = False, - documentation_with_field_description=True, -): - """ - Generate GBNF grammar and documentation, and save them to specified files. - - Args: - pydantic_model_list: List of Pydantic model classes. - grammar_file_path (str): File path to save the generated GBNF grammar. - documentation_file_path (str): File path to save the generated documentation. - outer_object_name (str): Outer object name for the GBNF grammar. If None, no outer object will be generated. Eg. "function" for function calling. - outer_object_content (str): Content for the outer rule in the GBNF grammar. Eg. "function_parameters" or "params" for function calling. - model_prefix (str): Prefix for the model section in the documentation. - fields_prefix (str): Prefix for the fields section in the documentation. - list_of_outputs (bool): Whether the output is a list of items. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - None - """ - documentation = generate_markdown_documentation( - pydantic_model_list, model_prefix, fields_prefix, - documentation_with_field_description=documentation_with_field_description - ) - grammar = generate_gbnf_grammar_from_pydantic_models(pydantic_model_list, outer_object_name, outer_object_content, - list_of_outputs) - grammar = remove_empty_lines(grammar) - save_gbnf_grammar_and_documentation(grammar, documentation, grammar_file_path, documentation_file_path) - - -def generate_gbnf_grammar_and_documentation( - pydantic_model_list, - outer_object_name: str | None = None, - outer_object_content: str | None = None, - model_prefix: str = "Output Model", - fields_prefix: str = "Output Fields", - list_of_outputs: bool = False, - documentation_with_field_description=True, -): - """ - Generate GBNF grammar and documentation for a list of Pydantic models. - - Args: - pydantic_model_list: List of Pydantic model classes. - outer_object_name (str): Outer object name for the GBNF grammar. If None, no outer object will be generated. Eg. "function" for function calling. - outer_object_content (str): Content for the outer rule in the GBNF grammar. Eg. "function_parameters" or "params" for function calling. - model_prefix (str): Prefix for the model section in the documentation. - fields_prefix (str): Prefix for the fields section in the documentation. - list_of_outputs (bool): Whether the output is a list of items. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - tuple: GBNF grammar string, documentation string. - """ - documentation = generate_markdown_documentation( - copy(pydantic_model_list), model_prefix, fields_prefix, - documentation_with_field_description=documentation_with_field_description - ) - grammar = generate_gbnf_grammar_from_pydantic_models(pydantic_model_list, outer_object_name, outer_object_content, - list_of_outputs) - grammar = remove_empty_lines(grammar + get_primitive_grammar(grammar)) - return grammar, documentation - - -def generate_gbnf_grammar_and_documentation_from_dictionaries( - dictionaries: list[dict[str, Any]], - outer_object_name: str | None = None, - outer_object_content: str | None = None, - model_prefix: str = "Output Model", - fields_prefix: str = "Output Fields", - list_of_outputs: bool = False, - documentation_with_field_description=True, -): - """ - Generate GBNF grammar and documentation from a list of dictionaries. - - Args: - dictionaries (list[dict]): List of dictionaries representing Pydantic models. - outer_object_name (str): Outer object name for the GBNF grammar. If None, no outer object will be generated. Eg. "function" for function calling. - outer_object_content (str): Content for the outer rule in the GBNF grammar. Eg. "function_parameters" or "params" for function calling. - model_prefix (str): Prefix for the model section in the documentation. - fields_prefix (str): Prefix for the fields section in the documentation. - list_of_outputs (bool): Whether the output is a list of items. - documentation_with_field_description (bool): Include field descriptions in the documentation. - - Returns: - tuple: GBNF grammar string, documentation string. - """ - pydantic_model_list = create_dynamic_models_from_dictionaries(dictionaries) - documentation = generate_markdown_documentation( - copy(pydantic_model_list), model_prefix, fields_prefix, - documentation_with_field_description=documentation_with_field_description - ) - grammar = generate_gbnf_grammar_from_pydantic_models(pydantic_model_list, outer_object_name, outer_object_content, - list_of_outputs) - grammar = remove_empty_lines(grammar + get_primitive_grammar(grammar)) - return grammar, documentation - - -def create_dynamic_model_from_function(func: Callable[..., Any]): - """ - Creates a dynamic Pydantic model from a given function's type hints and adds the function as a 'run' method. - - Args: - func (Callable): A function with type hints from which to create the model. - - Returns: - A dynamic Pydantic model class with the provided function as a 'run' method. - """ - - # Get the signature of the function - sig = inspect.signature(func) - - # Parse the docstring - assert func.__doc__ is not None - docstring = parse(func.__doc__) - - dynamic_fields = {} - param_docs = [] - for param in sig.parameters.values(): - # Exclude 'self' parameter - if param.name == "self": - continue - - # Assert that the parameter has a type annotation - if param.annotation == inspect.Parameter.empty: - raise TypeError(f"Parameter '{param.name}' in function '{func.__name__}' lacks a type annotation") - - # Find the parameter's description in the docstring - param_doc = next((d for d in docstring.params if d.arg_name == param.name), None) - - # Assert that the parameter has a description - if not param_doc or not param_doc.description: - raise ValueError( - f"Parameter '{param.name}' in function '{func.__name__}' lacks a description in the docstring") - - # Add parameter details to the schema - param_docs.append((param.name, param_doc)) - if param.default == inspect.Parameter.empty: - default_value = ... - else: - default_value = param.default - dynamic_fields[param.name] = ( - param.annotation if param.annotation != inspect.Parameter.empty else str, default_value) - # Creating the dynamic model - dynamic_model = create_model(f"{func.__name__}", **dynamic_fields) - - for name, param_doc in param_docs: - dynamic_model.model_fields[name].description = param_doc.description - - dynamic_model.__doc__ = docstring.short_description - - def run_method_wrapper(self): - func_args = {name: getattr(self, name) for name, _ in dynamic_fields.items()} - return func(**func_args) - - # Adding the wrapped function as a 'run' method - setattr(dynamic_model, "run", run_method_wrapper) - return dynamic_model - - -def add_run_method_to_dynamic_model(model: type[BaseModel], func: Callable[..., Any]): - """ - Add a 'run' method to a dynamic Pydantic model, using the provided function. - - Args: - model (type[BaseModel]): Dynamic Pydantic model class. - func (Callable): Function to be added as a 'run' method to the model. - - Returns: - type[BaseModel]: Pydantic model class with the added 'run' method. - """ - - def run_method_wrapper(self): - func_args = {name: getattr(self, name) for name in model.model_fields} - return func(**func_args) - - # Adding the wrapped function as a 'run' method - setattr(model, "run", run_method_wrapper) - - return model - - -def create_dynamic_models_from_dictionaries(dictionaries: list[dict[str, Any]]): - """ - Create a list of dynamic Pydantic model classes from a list of dictionaries. - - Args: - dictionaries (list[dict]): List of dictionaries representing model structures. - - Returns: - list[type[BaseModel]]: List of generated dynamic Pydantic model classes. - """ - dynamic_models = [] - for func in dictionaries: - model_name = format_model_and_field_name(func.get("name", "")) - dyn_model = convert_dictionary_to_pydantic_model(func, model_name) - dynamic_models.append(dyn_model) - return dynamic_models - - -def map_grammar_names_to_pydantic_model_class(pydantic_model_list): - output = {} - for model in pydantic_model_list: - output[format_model_and_field_name(model.__name__)] = model - - return output - - -def json_schema_to_python_types(schema): - type_map = { - "any": Any, - "string": str, - "number": float, - "integer": int, - "boolean": bool, - "array": list, - } - return type_map[schema] - - -def list_to_enum(enum_name, values): - return Enum(enum_name, {value: value for value in values}) - - -def convert_dictionary_to_pydantic_model(dictionary: dict[str, Any], model_name: str = "CustomModel") -> type[Any]: - """ - Convert a dictionary to a Pydantic model class. - - Args: - dictionary (dict): Dictionary representing the model structure. - model_name (str): Name of the generated Pydantic model. - - Returns: - type[BaseModel]: Generated Pydantic model class. - """ - fields: dict[str, Any] = {} - - if "properties" in dictionary: - for field_name, field_data in dictionary.get("properties", {}).items(): - if field_data == "object": - submodel = convert_dictionary_to_pydantic_model(dictionary, f"{model_name}_{field_name}") - fields[field_name] = (submodel, ...) - else: - field_type = field_data.get("type", "str") - - if field_data.get("enum", []): - fields[field_name] = (list_to_enum(field_name, field_data.get("enum", [])), ...) - elif field_type == "array": - items = field_data.get("items", {}) - if items != {}: - array = {"properties": items} - array_type = convert_dictionary_to_pydantic_model(array, f"{model_name}_{field_name}_items") - fields[field_name] = (List[array_type], ...) - else: - fields[field_name] = (list, ...) - elif field_type == "object": - submodel = convert_dictionary_to_pydantic_model(field_data, f"{model_name}_{field_name}") - fields[field_name] = (submodel, ...) - elif field_type == "required": - required = field_data.get("enum", []) - for key, field in fields.items(): - if key not in required: - optional_type = fields[key][0] - fields[key] = (Optional[optional_type], ...) - else: - field_type = json_schema_to_python_types(field_type) - fields[field_name] = (field_type, ...) - if "function" in dictionary: - for field_name, field_data in dictionary.get("function", {}).items(): - if field_name == "name": - model_name = field_data - elif field_name == "description": - fields["__doc__"] = field_data - elif field_name == "parameters": - return convert_dictionary_to_pydantic_model(field_data, f"{model_name}") - - if "parameters" in dictionary: - field_data = {"function": dictionary} - return convert_dictionary_to_pydantic_model(field_data, f"{model_name}") - if "required" in dictionary: - required = dictionary.get("required", []) - for key, field in fields.items(): - if key not in required: - optional_type = fields[key][0] - fields[key] = (Optional[optional_type], ...) - custom_model = create_model(model_name, **fields) - return custom_model diff --git a/examples/pydantic_models_to_grammar_examples.py b/examples/pydantic_models_to_grammar_examples.py deleted file mode 100755 index eb000d5cc..000000000 --- a/examples/pydantic_models_to_grammar_examples.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/env python3 - -"""Function calling example using pydantic models.""" - -from __future__ import annotations - -import argparse -import datetime -import json -import logging -import textwrap -import sys -from enum import Enum -from typing import Optional, Union - -import requests -from pydantic import BaseModel, Field -from pydantic_models_to_grammar import (add_run_method_to_dynamic_model, convert_dictionary_to_pydantic_model, - create_dynamic_model_from_function, generate_gbnf_grammar_and_documentation) - - -def create_completion(host, prompt, gbnf_grammar): - """Calls the /completion API on llama-server. - - See - https://github.com/ggerganov/llama.cpp/tree/HEAD/examples/server#api-endpoints - """ - print(f" Request:\n Grammar:\n{textwrap.indent(gbnf_grammar, ' ')}\n Prompt:\n{textwrap.indent(prompt.rstrip(), ' ')}") - headers = {"Content-Type": "application/json"} - data = {"prompt": prompt, "grammar": gbnf_grammar} - result = requests.post(f"http://{host}/completion", headers=headers, json=data).json() - assert data.get("error") is None, data - logging.info("Result: %s", result) - content = result["content"] - print(f" Model: {result['model']}") - print(f" Result:\n{textwrap.indent(json.dumps(json.loads(content), indent=2), ' ')}") - return content - - -# A function for the agent to send a message to the user. -class SendMessageToUser(BaseModel): - """Send a message to the User.""" - chain_of_thought: str = Field(..., description="Your chain of thought while sending the message.") - message: str = Field(..., description="Message you want to send to the user.") - - def run(self): - print(f"SendMessageToUser: {self.message}") - - -def example_rce(host): - """Minimal test case where the LLM call an arbitrary python function.""" - print("- example_rce") - tools = [SendMessageToUser] - gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation( - pydantic_model_list=tools, outer_object_name="function", - outer_object_content="function_parameters", model_prefix="Function", fields_prefix="Parameters") - system_message = "You are an advanced AI, tasked to assist the user by calling functions in JSON format. The following are the available functions and their parameters and types:\n\n" + documentation - user_message = "What is 42 * 42?" - prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message}<|im_end|>\n<|im_start|>assistant" - text = create_completion(host, prompt, gbnf_grammar) - json_data = json.loads(text) - tools_map = {tool.__name__:tool for tool in tools} - # This finds "SendMessageToUser": - tool = tools_map.get(json_data["function"]) - if not tool: - print(f"Error: unknown tool {json_data['function']}") - return 1 - tool(**json_data["function_parameters"]).run() - return 0 - - -# Enum for the calculator tool. -class MathOperation(Enum): - ADD = "add" - SUBTRACT = "subtract" - MULTIPLY = "multiply" - DIVIDE = "divide" - - -# Simple pydantic calculator tool for the agent that can add, subtract, -# multiply, and divide. Docstring and description of fields will be used in -# system prompt. -class Calculator(BaseModel): - """Perform a math operation on two numbers.""" - number_one: Union[int, float] = Field(..., description="First number.") - operation: MathOperation = Field(..., description="Math operation to perform.") - number_two: Union[int, float] = Field(..., description="Second number.") - - def run(self): - if self.operation == MathOperation.ADD: - return self.number_one + self.number_two - elif self.operation == MathOperation.SUBTRACT: - return self.number_one - self.number_two - elif self.operation == MathOperation.MULTIPLY: - return self.number_one * self.number_two - elif self.operation == MathOperation.DIVIDE: - return self.number_one / self.number_two - else: - raise ValueError("Unknown operation.") - - -def example_calculator(host): - """Have the LLM ask to get a calculation done. - - Here the grammar gets generated by passing the available function models to - generate_gbnf_grammar_and_documentation function. This also generates a - documentation usable by the LLM. - - pydantic_model_list is the list of pydantic models outer_object_name is an - optional name for an outer object around the actual model object. Like a - "function" object with "function_parameters" which contains the actual model - object. If None, no outer object will be generated outer_object_content is - the name of outer object content. - - model_prefix is the optional prefix for models in the documentation. (Default="Output Model") - fields_prefix is the prefix for the model fields in the documentation. (Default="Output Fields") - """ - print("- example_calculator") - tools = [SendMessageToUser, Calculator] - gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation( - pydantic_model_list=tools, outer_object_name="function", - outer_object_content="function_parameters", model_prefix="Function", fields_prefix="Parameters") - system_message = "You are an advanced AI, tasked to assist the user by calling functions in JSON format. The following are the available functions and their parameters and types:\n\n" + documentation - user_message1 = "What is 42 * 42?" - prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{user_message1}<|im_end|>\n<|im_start|>assistant" - text = create_completion(host, prompt, gbnf_grammar) - json_data = json.loads(text) - expected = { - "function": "Calculator", - "function_parameters": { - "number_one": 42, - "operation": "multiply", - "number_two": 42 - } - } - if json_data != expected: - print(" Result is not as expected!") - tools_map = {tool.__name__:tool for tool in tools} - # This finds "Calculator": - tool = tools_map.get(json_data["function"]) - if not tool: - print(f"Error: unknown tool {json_data['function']}") - return 1 - result = tool(**json_data["function_parameters"]).run() - print(f" Call {json_data['function']} gave result {result}") - return 0 - - -class Category(Enum): - """The category of the book.""" - Fiction = "Fiction" - NonFiction = "Non-Fiction" - - -class Book(BaseModel): - """Represents an entry about a book.""" - title: str = Field(..., description="Title of the book.") - author: str = Field(..., description="Author of the book.") - published_year: Optional[int] = Field(..., description="Publishing year of the book.") - keywords: list[str] = Field(..., description="A list of keywords.") - category: Category = Field(..., description="Category of the book.") - summary: str = Field(..., description="Summary of the book.") - - -def example_struct(host): - """A example structured output based on pydantic models. - - The LLM will create an entry for a Book database out of an unstructured - text. We need no additional parameters other than our list of pydantic - models. - """ - print("- example_struct") - tools = [Book] - gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation(pydantic_model_list=tools) - system_message = "You are an advanced AI, tasked to create a dataset entry in JSON for a Book. The following is the expected output model:\n\n" + documentation - text = """The Feynman Lectures on Physics is a physics textbook based on some lectures by Richard Feynman, a Nobel laureate who has sometimes been called "The Great Explainer". The lectures were presented before undergraduate students at the California Institute of Technology (Caltech), during 1961–1963. The book's co-authors are Feynman, Robert B. Leighton, and Matthew Sands.""" - prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{text}<|im_end|>\n<|im_start|>assistant" - text = create_completion(host, prompt, gbnf_grammar) - json_data = json.loads(text) - # In this case, there's no function nor function_parameters. - # Here the result will vary based on the LLM used. - keys = sorted(["title", "author", "published_year", "keywords", "category", "summary"]) - if keys != sorted(json_data.keys()): - print(f"Unexpected result: {sorted(json_data.keys())}") - return 1 - book = Book(**json_data) - print(f" As a Book object: %s" % book) - return 0 - - -def get_current_datetime(output_format: Optional[str] = None): - """Get the current date and time in the given format. - - Args: - output_format: formatting string for the date and time, defaults to '%Y-%m-%d %H:%M:%S' - """ - return datetime.datetime.now().strftime(output_format or "%Y-%m-%d %H:%M:%S") - - -# Example function to get the weather. -def get_current_weather(location, unit): - """Get the current weather in a given location""" - if "London" in location: - return json.dumps({"location": "London", "temperature": "42", "unit": unit.value}) - elif "New York" in location: - return json.dumps({"location": "New York", "temperature": "24", "unit": unit.value}) - elif "North Pole" in location: - return json.dumps({"location": "North Pole", "temperature": "-42", "unit": unit.value}) - return json.dumps({"location": location, "temperature": "unknown"}) - - -def example_concurrent(host): - """An example for parallel function calling with a Python function, a pydantic - function model and an OpenAI like function definition. - """ - print("- example_concurrent") - # Function definition in OpenAI style. - current_weather_tool = { - "type": "function", - "function": { - "name": "get_current_weather", - "description": "Get the current weather in a given location", - "parameters": { - "type": "object", - "properties": { - "location": { - "type": "string", - "description": "The city and state, e.g. San Francisco, CA", - }, - "unit": {"type": "string", "enum": ["celsius", "fahrenheit"]}, - }, - "required": ["location"], - }, - }, - } - # Convert OpenAI function definition into pydantic model. - current_weather_tool_model = convert_dictionary_to_pydantic_model(current_weather_tool) - # Add the actual function to a pydantic model. - current_weather_tool_model = add_run_method_to_dynamic_model(current_weather_tool_model, get_current_weather) - - # Convert normal Python function to a pydantic model. - current_datetime_model = create_dynamic_model_from_function(get_current_datetime) - - tools = [SendMessageToUser, Calculator, current_datetime_model, current_weather_tool_model] - gbnf_grammar, documentation = generate_gbnf_grammar_and_documentation( - pydantic_model_list=tools, outer_object_name="function", - outer_object_content="params", model_prefix="Function", fields_prefix="Parameters", list_of_outputs=True) - system_message = "You are an advanced AI assistant. You are interacting with the user and with your environment by calling functions. You call functions by writing JSON objects, which represent specific function calls.\nBelow is a list of your available function calls:\n\n" + documentation - text = """Get the date and time, get the current weather in celsius in London and solve the following calculation: 42 * 42""" - prompt = f"<|im_start|>system\n{system_message}<|im_end|>\n<|im_start|>user\n{text}<|im_end|>\n<|im_start|>assistant" - text = create_completion(host, prompt, gbnf_grammar) - json_data = json.loads(text) - expected = [ - { - "function": "get_current_datetime", - "params": { - "output_format": "%Y-%m-%d %H:%M:%S" - } - }, - { - "function": "get_current_weather", - "params": { - "location": "London", - "unit": "celsius" - } - }, - { - "function": "Calculator", - "params": { - "number_one": 42, - "operation": "multiply", - "number_two": 42 - } - } - ] - res = 0 - if json_data != expected: - print(" Result is not as expected!") - print(" This can happen on highly quantized models") - res = 1 - tools_map = {tool.__name__:tool for tool in tools} - for call in json_data: - tool = tools_map.get(call["function"]) - if not tool: - print(f"Error: unknown tool {call['function']}") - return 1 - result = tool(**call["params"]).run() - print(f" Call {call['function']} returned {result}") - # Should output something like this: - # Call get_current_datetime returned 2024-07-15 09:50:38 - # Call get_current_weather returned {"location": "London", "temperature": "42", "unit": "celsius"} - # Call Calculator returned 1764 - return res - - -def main(): - parser = argparse.ArgumentParser(description=sys.modules[__name__].__doc__) - parser.add_argument("--host", default="localhost:8080", help="llama.cpp server") - parser.add_argument("-v", "--verbose", action="store_true", help="enables logging") - args = parser.parse_args() - logging.basicConfig(level=logging.INFO if args.verbose else logging.ERROR) - ret = 0 - # Comment out below to only run the example you want. - ret = ret or example_rce(args.host) - ret = ret or example_calculator(args.host) - ret = ret or example_struct(args.host) - ret = ret or example_concurrent(args.host) - return ret - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/examples/reason-act.sh b/examples/reason-act.sh deleted file mode 100755 index 06d592799..000000000 --- a/examples/reason-act.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -cd `dirname $0` -cd .. - -# get -m model parameter otherwise defer to default -if [ "$1" == "-m" ]; then - MODEL="-m $2 " -fi - -./llama-cli $MODEL --color \ - -f ./prompts/reason-act.txt \ - -i --interactive-first \ - --top_k 10000 --temp 0.2 --repeat_penalty 1 -t 7 -c 2048 \ - -r "Question:" -r "Observation:" --in-prefix " " \ - -n -1 diff --git a/examples/regex_to_grammar.py b/examples/regex_to_grammar.py deleted file mode 100644 index 5cd9210a4..000000000 --- a/examples/regex_to_grammar.py +++ /dev/null @@ -1,20 +0,0 @@ -import json, subprocess, sys, os - -assert len(sys.argv) >= 2 -[_, pattern, *rest] = sys.argv - -print(subprocess.check_output( - [ - "python", - os.path.join( - os.path.dirname(os.path.realpath(__file__)), - "json_schema_to_grammar.py"), - *rest, - "-", - "--raw-pattern", - ], - text=True, - input=json.dumps({ - "type": "string", - "pattern": pattern, - }, indent=2))) diff --git a/examples/server-llama2-13B.sh b/examples/server-llama2-13B.sh deleted file mode 100755 index 4ce79b7fa..000000000 --- a/examples/server-llama2-13B.sh +++ /dev/null @@ -1,26 +0,0 @@ -#!/bin/bash - -set -e - -cd "$(dirname "$0")/.." || exit - -# Specify the model you want to use here: -MODEL="${MODEL:-./models/llama-2-13b-chat.ggmlv3.q5_K_M.bin}" -PROMPT_TEMPLATE=${PROMPT_TEMPLATE:-./prompts/chat-system.txt} - -# Adjust to the number of CPU cores you want to use. -N_THREAD="${N_THREAD:-12}" - -# Note: you can also override the generation options by specifying them on the command line: -GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 4096 --batch-size 1024}" - - -# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS -./llama-server $GEN_OPTIONS \ - --model "$MODEL" \ - --threads "$N_THREAD" \ - --rope-freq-scale 1.0 \ - "$@" - -# I used this to test the model with mps, but omitted it from the general purpose. If you want to use it, just specify it on the command line. -# -ngl 1 \ diff --git a/examples/server_embd.py b/examples/server_embd.py deleted file mode 100644 index 0e34c6cea..000000000 --- a/examples/server_embd.py +++ /dev/null @@ -1,35 +0,0 @@ -import asyncio -import asyncio.threads -import requests -import numpy as np - - -n = 8 - -result = [] - -async def requests_post_async(*args, **kwargs): - return await asyncio.threads.to_thread(requests.post, *args, **kwargs) - -async def main(): - model_url = "http://127.0.0.1:6900" - responses: list[requests.Response] = await asyncio.gather(*[requests_post_async( - url= f"{model_url}/embedding", - json= {"content": str(0)*1024} - ) for i in range(n)]) - - for response in responses: - embedding = response.json()["embedding"] - print(embedding[-8:]) - result.append(embedding) - -asyncio.run(main()) - -# compute cosine similarity - -for i in range(n-1): - for j in range(i+1, n): - embedding1 = np.array(result[i]) - embedding2 = np.array(result[j]) - similarity = np.dot(embedding1, embedding2) / (np.linalg.norm(embedding1) * np.linalg.norm(embedding2)) - print(f"Similarity between {i} and {j}: {similarity:.2f}") diff --git a/examples/ts-type-to-grammar.sh b/examples/ts-type-to-grammar.sh deleted file mode 100755 index 9abba2a3d..000000000 --- a/examples/ts-type-to-grammar.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -# -# ./examples/ts-type-to-grammar.sh "{a:string,b:string,c?:string}" -# python examples/json_schema_to_grammar.py https://json.schemastore.org/tsconfig.json -# -set -euo pipefail - -readonly type="$1" - -# Create a temporary directory -TMPDIR="" -trap 'rm -fR "$TMPDIR"' EXIT -TMPDIR=$(mktemp -d) - -DTS_FILE="$TMPDIR/type.d.ts" -SCHEMA_FILE="$TMPDIR/schema.json" - -echo "export type MyType = $type" > "$DTS_FILE" - -# This is a fork of typescript-json-schema, actively maintained as of March 2024: -# https://github.com/vega/ts-json-schema-generator -npx ts-json-schema-generator --unstable --no-top-ref --path "$DTS_FILE" --type MyType -e none > "$SCHEMA_FILE" - -# Alternative, not actively maintained as of March 2024: -# https://github.com/YousefED/typescript-json-schema -# npx typescript-json-schema --defaultProps --required "$DTS_FILE" MyType | tee "$SCHEMA_FILE" >&2 - -./examples/json_schema_to_grammar.py "$SCHEMA_FILE" diff --git a/include/llama.h b/include/llama.h index 413070d95..ff95f6929 100644 --- a/include/llama.h +++ b/include/llama.h @@ -1163,7 +1163,7 @@ extern "C" { // Performance information LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx); - LLAMA_API void llama_print_timings(struct llama_context * ctx); + LLAMA_API void antigma_print_timings(struct llama_context * ctx); LLAMA_API void llama_reset_timings(struct llama_context * ctx); // Print system information diff --git a/src/llama.cpp b/src/llama.cpp index da7bcb113..8016ae981 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -19097,17 +19097,17 @@ struct llama_timings llama_get_timings(struct llama_context * ctx) { return result; } -void llama_print_timings(struct llama_context * ctx) { +void antigma_print_timings(struct llama_context * ctx) { const llama_timings timings = llama_get_timings(ctx); LLAMA_LOG_INFO("\n"); - // LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms); - // LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", - // __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample); - // LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n", - // __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval); - // LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", - // __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval); + LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms); + LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", + __func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample); + LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n", + __func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval); + LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", + __func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval); LLAMA_LOG_INFO("Antigma timer: total time = %10.2f ms / %5d tokens\n", (timings.t_end_ms - timings.t_start_ms), (timings.n_p_eval + timings.n_eval)); }