From e586ee42595500c53938e937b6b6ad5353ad76dc Mon Sep 17 00:00:00 2001 From: Benjamin Findley <39356821+Kartoffelsaft@users.noreply.github.com> Date: Sun, 12 May 2024 19:40:08 -0700 Subject: [PATCH 01/51] change default temperature of OAI compat API from 0 to 1 (#7226) * change default temperature of OAI compat API from 0 to 1 * make tests explicitly send temperature to OAI API --- examples/server/tests/features/steps/steps.py | 7 +++++-- examples/server/utils.hpp | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/examples/server/tests/features/steps/steps.py b/examples/server/tests/features/steps/steps.py index f4b1ac1d7..577b87af3 100644 --- a/examples/server/tests/features/steps/steps.py +++ b/examples/server/tests/features/steps/steps.py @@ -887,6 +887,7 @@ async def oai_chat_completions(user_prompt, base_path, async_client, debug=False, + temperature=None, model=None, n_predict=None, enable_streaming=None, @@ -913,7 +914,8 @@ async def oai_chat_completions(user_prompt, "model": model, "max_tokens": n_predict, "stream": enable_streaming, - "seed": seed + "temperature": temperature if temperature is not None else 0.0, + "seed": seed, } if response_format is not None: payload['response_format'] = response_format @@ -978,7 +980,8 @@ async def oai_chat_completions(user_prompt, max_tokens=n_predict, stream=enable_streaming, response_format=payload.get('response_format'), - seed=seed + seed=seed, + temperature=payload['temperature'] ) except openai.error.AuthenticationError as e: if expect_api_error is not None and expect_api_error: diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index d872b63f5..d8a2286e4 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -371,7 +371,7 @@ static json oaicompat_completion_params_parse( llama_params["presence_penalty"] = json_value(body, "presence_penalty", 0.0); llama_params["seed"] = json_value(body, "seed", LLAMA_DEFAULT_SEED); llama_params["stream"] = json_value(body, "stream", false); - llama_params["temperature"] = json_value(body, "temperature", 0.0); + llama_params["temperature"] = json_value(body, "temperature", 1.0); llama_params["top_p"] = json_value(body, "top_p", 1.0); // Apply chat template to the list of messages From b1f8af1886e8187db6bb2a9b87cfc1c0f175f629 Mon Sep 17 00:00:00 2001 From: Brian Date: Mon, 13 May 2024 12:56:47 +1000 Subject: [PATCH 02/51] convert.py: Outfile default name change and additional metadata support (#4858) * convert.py: Outfile default name change and additional metadata support * convert.py: don't stringify Metadata load method output * convert.py: typo fix * convert.py: fix metadata format to sync with LLM_KV_NAMES in llama.cpp --- convert.py | 178 +++++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 154 insertions(+), 24 deletions(-) diff --git a/convert.py b/convert.py index 148bfd66a..e2e642351 100755 --- a/convert.py +++ b/convert.py @@ -24,7 +24,7 @@ from abc import ABC, abstractmethod from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor from dataclasses import dataclass from pathlib import Path -from typing import TYPE_CHECKING, Any, Callable, ClassVar, IO, Iterable, Literal, Protocol, TypeVar, runtime_checkable +from typing import TYPE_CHECKING, Any, Callable, ClassVar, IO, Iterable, Literal, Protocol, TypeVar, runtime_checkable, Optional import numpy as np from sentencepiece import SentencePieceProcessor @@ -344,10 +344,47 @@ class Params: return params +@dataclass +class Metadata: + name: Optional[str] = None + author: Optional[str] = None + version: Optional[str] = None + url: Optional[str] = None + description: Optional[str] = None + licence: Optional[str] = None + source_url: Optional[str] = None + source_hf_repo: Optional[str] = None + + @staticmethod + def load(metadata_path: Path) -> Metadata: + if metadata_path is None or not metadata_path.exists(): + return Metadata() + + with open(metadata_path, 'r') as file: + data = json.load(file) + + # Create a new Metadata instance + metadata = Metadata() + + # Assigning values to Metadata attributes if they exist in the JSON file + # This is based on LLM_KV_NAMES mapping in llama.cpp + metadata.name = data.get("general.name") + metadata.author = data.get("general.author") + metadata.version = data.get("general.version") + metadata.url = data.get("general.url") + metadata.description = data.get("general.description") + metadata.license = data.get("general.license") + metadata.source_url = data.get("general.source.url") + metadata.source_hf_repo = data.get("general.source.huggingface.repository") + + return metadata + + # # vocab # + @runtime_checkable class BaseVocab(Protocol): tokenizer_model: ClassVar[str] @@ -1066,21 +1103,42 @@ class OutputFile: def __init__(self, fname_out: Path, endianess:gguf.GGUFEndian = gguf.GGUFEndian.LITTLE): self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH], endianess=endianess) - def add_meta_arch(self, params: Params) -> None: + def add_meta_model(self, params: Params, metadata: Metadata) -> None: + # Metadata About The Model And Its Provenence name = "LLaMA" - - # TODO: better logic to determine model name - if params.n_ctx == 4096: - name = "LLaMA v2" + if metadata is not None and metadata.name is not None: + name = metadata.name elif params.path_model is not None: - name = str(params.path_model.parent).split('/')[-1] + name = str(params.path_model.parent).split("/")[-1] + elif params.n_ctx == 4096: + # Heuristic detection of LLaMA v2 model + name = "LLaMA v2" - self.gguf.add_name (name) - self.gguf.add_vocab_size (params.n_vocab) - self.gguf.add_context_length (params.n_ctx) - self.gguf.add_embedding_length (params.n_embd) - self.gguf.add_block_count (params.n_layer) - self.gguf.add_feed_forward_length (params.n_ff) + self.gguf.add_name(name) + + if metadata is not None: + if metadata.author is not None: + self.gguf.add_author(metadata.author) + if metadata.version is not None: + self.gguf.add_version(metadata.version) + if metadata.url is not None: + self.gguf.add_url(metadata.url) + if metadata.description is not None: + self.gguf.add_description(metadata.description) + if metadata.licence is not None: + self.gguf.add_licence(metadata.licence) + if metadata.source_url is not None: + self.gguf.add_source_url(metadata.source_url) + if metadata.source_hf_repo is not None: + self.gguf.add_source_hf_repo(metadata.source_hf_repo) + + def add_meta_arch(self, params: Params) -> None: + # Metadata About The Neural Architecture Itself + self.gguf.add_vocab_size(params.n_vocab) + self.gguf.add_context_length(params.n_ctx) + self.gguf.add_embedding_length(params.n_embd) + self.gguf.add_block_count(params.n_layer) + self.gguf.add_feed_forward_length(params.n_ff) self.gguf.add_rope_dimension_count(params.n_embd // params.n_head) self.gguf.add_head_count (params.n_head) self.gguf.add_head_count_kv (params.n_head_kv) @@ -1183,13 +1241,14 @@ class OutputFile: @staticmethod def write_vocab_only( fname_out: Path, params: Params, vocab: Vocab, svocab: gguf.SpecialVocab, - endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False, + endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False, metadata: Metadata = None, ) -> None: check_vocab_size(params, vocab, pad_vocab=pad_vocab) of = OutputFile(fname_out, endianess=endianess) # meta data + of.add_meta_model(params, metadata) of.add_meta_arch(params) of.add_meta_vocab(vocab) of.add_meta_special_vocab(svocab) @@ -1216,12 +1275,14 @@ class OutputFile: fname_out: Path, ftype: GGMLFileType, params: Params, model: LazyModel, vocab: BaseVocab, svocab: gguf.SpecialVocab, concurrency: int = DEFAULT_CONCURRENCY, endianess: gguf.GGUFEndian = gguf.GGUFEndian.LITTLE, pad_vocab: bool = False, + metadata: Metadata = None, ) -> None: check_vocab_size(params, vocab, pad_vocab=pad_vocab) of = OutputFile(fname_out, endianess=endianess) # meta data + of.add_meta_model(params, metadata) of.add_meta_arch(params) if isinstance(vocab, Vocab): of.add_meta_vocab(vocab) @@ -1257,6 +1318,37 @@ def pick_output_type(model: LazyModel, output_type_str: str | None) -> GGMLFileT raise ValueError(f"Unexpected combination of types: {name_to_type}") +def model_parameter_count(model: LazyModel) -> int: + total_model_parameters = 0 + for i, (name, lazy_tensor) in enumerate(model.items()): + sum_weights_in_tensor = 1 + for dim in lazy_tensor.shape: + sum_weights_in_tensor *= dim + total_model_parameters += sum_weights_in_tensor + return total_model_parameters + + +def model_parameter_count_rounded_notation(model_params_count: int) -> str: + if model_params_count > 1e12 : + # Trillions Of Parameters + scaled_model_params = model_params_count * 1e-12 + scale_suffix = "T" + elif model_params_count > 1e9 : + # Billions Of Parameters + scaled_model_params = model_params_count * 1e-9 + scale_suffix = "B" + elif model_params_count > 1e6 : + # Millions Of Parameters + scaled_model_params = model_params_count * 1e-6 + scale_suffix = "M" + else: + # Thousands Of Parameters + scaled_model_params = model_params_count * 1e-3 + scale_suffix = "K" + + return f"{round(scaled_model_params)}{scale_suffix}" + + def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel: return {name: tensor.astype(output_type.type_for_tensor(name, tensor)) for (name, tensor) in model.items()} @@ -1436,13 +1528,35 @@ class VocabFactory: return vocab, special_vocab -def default_outfile(model_paths: list[Path], file_type: GGMLFileType) -> Path: - namestr = { - GGMLFileType.AllF32: "f32", - GGMLFileType.MostlyF16: "f16", - GGMLFileType.MostlyQ8_0:"q8_0", +def default_convention_outfile(file_type: GGMLFileType, params: Params, model_params_count: int, metadata: Metadata) -> str: + quantization = { + GGMLFileType.AllF32: "F32", + GGMLFileType.MostlyF16: "F16", + GGMLFileType.MostlyQ8_0: "Q8_0", }[file_type] - ret = model_paths[0].parent / f"ggml-model-{namestr}.gguf" + + parameters = model_parameter_count_rounded_notation(model_params_count) + + expert_count = "" + if params.n_experts is not None: + expert_count = f"{params.n_experts}x" + + version = "" + if metadata is not None and metadata.version is not None: + version = f"-{metadata.version}" + + name = "ggml-model" + if metadata is not None and metadata.name is not None: + name = metadata.name + elif params.path_model is not None: + name = params.path_model.name + + return f"{name}{version}-{expert_count}{parameters}-{quantization}" + + +def default_outfile(model_paths: list[Path], file_type: GGMLFileType, params: Params, model_params_count: int, metadata: Metadata) -> Path: + default_filename = default_convention_outfile(file_type, params, model_params_count, metadata) + ret = model_paths[0].parent / f"{default_filename}.gguf" if ret in model_paths: logger.error( f"Error: Default output path ({ret}) would overwrite the input. " @@ -1480,17 +1594,30 @@ def main(args_in: list[str] | None = None) -> None: parser.add_argument("--pad-vocab", action="store_true", help="add pad tokens when model vocab expects more than tokenizer metadata provides") parser.add_argument("--skip-unknown", action="store_true", help="skip unknown tensor names instead of failing") parser.add_argument("--verbose", action="store_true", help="increase output verbosity") + parser.add_argument("--metadata", type=Path, help="Specify the path for a metadata file") + parser.add_argument("--get-outfile", action="store_true", help="get calculated default outfile name") args = parser.parse_args(args_in) if args.verbose: logging.basicConfig(level=logging.DEBUG) - elif args.dump_single or args.dump: + elif args.dump_single or args.dump or args.get_outfile: # Avoid printing anything besides the dump output logging.basicConfig(level=logging.WARNING) else: logging.basicConfig(level=logging.INFO) + metadata = Metadata.load(args.metadata) + + if args.get_outfile: + model_plus = load_some_model(args.model) + params = Params.load(model_plus) + model = convert_model_names(model_plus.model, params, args.skip_unknown) + model_params_count = model_parameter_count(model_plus.model) + ftype = pick_output_type(model, args.outtype) + print(f"{default_convention_outfile(ftype, params, model_params_count, metadata)}") # noqa: NP100 + return + if args.no_vocab and args.vocab_only: raise ValueError("--vocab-only does not make sense with --no-vocab") @@ -1504,6 +1631,9 @@ def main(args_in: list[str] | None = None) -> None: else: model_plus = ModelPlus(model = {}, paths = [args.model / 'dummy'], format = 'none', vocab = None) + model_params_count = model_parameter_count(model_plus.model) + logger.info(f"model parameters count : {model_params_count} ({model_parameter_count_rounded_notation(model_params_count)})") + if args.dump: do_dump_model(model_plus) return @@ -1557,7 +1687,7 @@ def main(args_in: list[str] | None = None) -> None: f_norm_eps = 1e-5, ) OutputFile.write_vocab_only(outfile, params, vocab, special_vocab, - endianess=endianess, pad_vocab=args.pad_vocab) + endianess=endianess, pad_vocab=args.pad_vocab, metadata=metadata) logger.info(f"Wrote {outfile}") return @@ -1570,13 +1700,13 @@ def main(args_in: list[str] | None = None) -> None: model = convert_model_names(model, params, args.skip_unknown) ftype = pick_output_type(model, args.outtype) model = convert_to_output_type(model, ftype) - outfile = args.outfile or default_outfile(model_plus.paths, ftype) + outfile = args.outfile or default_outfile(model_plus.paths, ftype, params, model_params_count, metadata) params.ftype = ftype logger.info(f"Writing {outfile}, format {ftype}") OutputFile.write_all(outfile, ftype, params, model, vocab, special_vocab, - concurrency=args.concurrency, endianess=endianess, pad_vocab=args.pad_vocab) + concurrency=args.concurrency, endianess=endianess, pad_vocab=args.pad_vocab, metadata=metadata) logger.info(f"Wrote {outfile}") From 9aa672490c848e45eaa704a554e0f1f6df995fc8 Mon Sep 17 00:00:00 2001 From: Joan Fontanals Date: Mon, 13 May 2024 10:35:14 +0200 Subject: [PATCH 03/51] llama : rename jina tokenizers to v2 (#7249) * refactor: rename jina tokenizers to v2 * refactor: keep refactoring non-breaking --- convert-hf-to-gguf-update.py | 6 +++--- convert-hf-to-gguf.py | 6 +++--- llama.cpp | 4 +++- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/convert-hf-to-gguf-update.py b/convert-hf-to-gguf-update.py index cd2674a0e..14aa0c45a 100755 --- a/convert-hf-to-gguf-update.py +++ b/convert-hf-to-gguf-update.py @@ -74,9 +74,9 @@ models = [ {"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", }, {"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", }, {"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", }, - {"name": "jina-en", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM! - {"name": "jina-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", }, - {"name": "jina-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", }, + {"name": "jina-v2-en", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM! + {"name": "jina-v2-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", }, + {"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", }, ] # make directory "models/tokenizers" if it doesn't exist diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index ec7f4dd75..d6e5dece0 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -475,13 +475,13 @@ class Model: res = "dbrx" if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f": # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en - res = "jina-en" + res = "jina-v2-en" if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643": # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es - res = "jina-es" + res = "jina-v2-es" if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6": # ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de - res = "jina-de" + res = "jina-v2-de" if res is None: logger.warning("\n") diff --git a/llama.cpp b/llama.cpp index e91ad7285..adbcc07e2 100644 --- a/llama.cpp +++ b/llama.cpp @@ -4424,7 +4424,9 @@ static void llm_load_vocab( } else if ( tokenizer_pre == "gpt-2" || tokenizer_pre == "jina-es" || - tokenizer_pre == "jina-de") { + tokenizer_pre == "jina-de" || + tokenizer_pre == "jina-v2-es" || + tokenizer_pre == "jina-v2-de") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2; } else if ( tokenizer_pre == "refact") { From 948f4ec7c5bff92b18e63303f2b2d1645bccd943 Mon Sep 17 00:00:00 2001 From: Neo Zhang <14088817+arthw@users.noreply.github.com> Date: Mon, 13 May 2024 18:11:26 +0800 Subject: [PATCH 04/51] [SYCL] rm wait() (#7233) --- ggml-sycl.cpp | 25 +------------------------ 1 file changed, 1 insertion(+), 24 deletions(-) diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index e93d2af63..724070eb9 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -15564,26 +15564,6 @@ static void ggml_sycl_mul_mat_batched_sycl(const ggml_tensor *src0, const int64_t r2 = ne12/ne02; const int64_t r3 = ne13/ne03; -#if 0 - // use syclGemmEx - { - for (int i13 = 0; i13 < ne13; ++i13) { - for (int i12 = 0; i12 < ne12; ++i12) { - int i03 = i13 / r3; - int i02 = i12 / r2; - - SYCL_CHECK( - syclGemmEx(g_sycl_handles[g_main_device], CUBLAS_OP_T, CUBLAS_OP_N, - ne01, ne11, ne10, - alpha, (const char *) src0_as_f16 + i02*src0->nb[2] + i03*src0->nb[3] , SYCL_R_16F, nb01/sizeof(half), - (const char *) src1_as_f16 + i12*src1->nb[2]/2 + i13*src1->nb[3]/2, SYCL_R_16F, nb11/sizeof(float), - beta, ( char *) dst_t + i12*nbd2 + i13*nbd3, cu_data_type, ne01, - cu_compute_type, - CUBLAS_GEMM_DEFAULT_TENSOR_OP)); - } - } - } -#else if (r2 == 1 && r3 == 1 && src0->nb[2]*src0->ne[2] == src0->nb[3] && src1->nb[2]*src1->ne[2] == src1->nb[3]) { // there is no broadcast and src0, src1 are contiguous across dims 2, 3 SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm_batch( @@ -15595,7 +15575,6 @@ static void ggml_sycl_mul_mat_batched_sycl(const ggml_tensor *src0, nb11 / nb10, nb12 / nb10, beta, (char *)dst_t, cu_data_type, ne01, nb2 / nb0, ne12 * ne13, cu_compute_type))); - g_sycl_handles[g_main_device]->wait(); } else { const int ne23 = ne12*ne13; @@ -15626,7 +15605,7 @@ static void ggml_sycl_mul_mat_batched_sycl(const ggml_tensor *src0, nb02, nb03, nb12_scaled, nb13_scaled, nbd2, nbd3, r2, r3, item_ct1); }); - }).wait(); + }); } SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm_batch( *g_sycl_handles[g_main_device], oneapi::mkl::transpose::trans, @@ -15637,9 +15616,7 @@ static void ggml_sycl_mul_mat_batched_sycl(const ggml_tensor *src0, dpct::library_data_t::real_half, nb11 / nb10, beta, (void **)(ptrs_dst.get() + 0 * ne23), cu_data_type, ne01, ne23, cu_compute_type))); - g_sycl_handles[g_main_device]->wait(); } -#endif if (no_mixed_dtypes) { const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(GGML_TYPE_F16); From 1c570d8beeebad95872dc738ea542a4a0022f78a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Mon, 13 May 2024 13:03:27 +0200 Subject: [PATCH 05/51] perplexity: add BF16 vs. FP16 results (#7150) --- examples/perplexity/README.md | 60 ++++++++++++++++++++++++++++++++++- 1 file changed, 59 insertions(+), 1 deletion(-) diff --git a/examples/perplexity/README.md b/examples/perplexity/README.md index c5e2bc5de..c2a3c5ce9 100644 --- a/examples/perplexity/README.md +++ b/examples/perplexity/README.md @@ -7,6 +7,8 @@ Also note that finetunes typically result in a higher perplexity value even thou Within llama.cpp the perplexity of base models is used primarily to judge the quality loss from e.g. quantized models vs. FP16. The convention among contributors is to use the Wikitext-2 test set for testing unless noted otherwise (can be obtained with `scripts/get-wikitext-2.sh`). +When numbers are listed all command line arguments and compilation options are left at their defaults unless noted otherwise. +llama.cpp numbers are **not** directly comparable to those of other projects because the exact values depend strongly on the implementation details. By default only the mean perplexity value and the corresponding uncertainty is calculated. The uncertainty is determined empirically by assuming a Gaussian distribution of the "correct" logits per and then applying error propagation. @@ -32,7 +34,13 @@ In addition to the KL divergence the following statistics are calculated with `- ## LLaMA 3 8b Scoreboard -Results are sorted by Kullback-Leibler divergence relative to FP16. +| Revision | f364eb6f | +|:---------|:-------------------| +| Backend | CUDA | +| CPU | AMD Epyc 7742 | +| GPU | 1x NVIDIA RTX 4090 | + +Results were generated using the CUDA backend and are sorted by Kullback-Leibler divergence relative to FP16. The "WT" importance matrices were created using varying numbers of Wikitext tokens and can be found [here](https://huggingface.co/JohannesGaessler/llama.cpp_importance_matrices/blob/main/imatrix-llama_3-8b-f16-2.7m_tokens.dat). | Quantization | imatrix | Model size [GiB] | PPL | ΔPPL | KLD | Mean Δp | RMS Δp | @@ -89,6 +97,12 @@ K-quants score better on mean Δp than the legacy quants than e.g. KL divergence ## LLaMA 2 vs. LLaMA 3 Quantization comparison +| Revision | f364eb6f | +|:---------|:-------------------| +| Backend | CUDA | +| CPU | AMD Epyc 7742 | +| GPU | 1x NVIDIA RTX 4090 | + | Metric | L2 7b q2_K | L3 8b q2_K | L2 7b q4_K_M | L3 8b q4_K_M | L2 7b q6_K | L3 8b q6_K | L2 7b q8_0 | L3 8b q8_0 | |-----------------|---------------------|---------------------|---------------------|---------------------|---------------------|---------------------|---------------------|---------------------| | Mean PPL | 5.794552 ± 0.032298 | 9.751568 ± 0.063312 | 5.877078 ± 0.032781 | 6.407115 ± 0.039119 | 5.808494 ± 0.032425 | 6.253382 ± 0.038078 | 5.798542 ± 0.032366 | 6.234284 ± 0.037878 | @@ -107,6 +121,50 @@ K-quants score better on mean Δp than the legacy quants than e.g. KL divergence | RMS Δp | 9.762 ± 0.053 % | 21.421 ± 0.079 % | 3.252 ± 0.024 % | 5.519 ± 0.050 % | 1.339 ± 0.010 % | 2.295 ± 0.019 % | 0.618 ± 0.011 % | 1.198 ± 0.007 % | | Same top p | 85.584 ± 0.086 % | 71.138 ± 0.119 % | 94.665 ± 0.055 % | 91.901 ± 0.072 % | 97.520 ± 0.038 % | 96.031 ± 0.051 % | 98.846 ± 0.026 % | 97.674 ± 0.040 % | +## LLaMA 3 BF16 vs. FP16 comparison + +| Revision | 83330d8c | +|:---------|:--------------| +| Backend | CPU | +| CPU | AMD Epyc 7742 | +| GPU | N/A | + +Results were calculated with LLaMA 3 8b BF16 as `--kl-divergence-base` and LLaMA 3 8b FP16 as the `--model` for comparison. + +| Metric | Value | +|--------------------------------|--------------------------| +| Mean PPL(Q) | 6.227711 ± 0.037833 | +| Mean PPL(base) | 6.225194 ± 0.037771 | +| Cor(ln(PPL(Q)), ln(PPL(base))) | 99.990% | +| Mean ln(PPL(Q)/PPL(base)) | 0.000404 ± 0.000086 | +| Mean PPL(Q)/PPL(base) | 1.000404 ± 0.000086 | +| Mean PPL(Q)-PPL(base) | 0.002517 ± 0.000536 | +| Mean KLD | 0.00002515 ± 0.00000020 | +| Maximum KLD | 0.012206 | +| 99.9% KLD | 0.000799 | +| 99.0% KLD | 0.000222 | +| 99.0% KLD | 0.000222 | +| Median KLD | 0.000013 | +| 10.0% KLD | -0.000002 | +| 5.0% KLD | -0.000008 | +| 1.0% KLD | -0.000023 | +| Minimum KLD | -0.000059 | +| Mean Δp | -0.0000745 ± 0.0003952 % | +| Maximum Δp | 4.186% | +| 99.9% Δp | 1.049% | +| 99.0% Δp | 0.439% | +| 95.0% Δp | 0.207% | +| 90.0% Δp | 0.125% | +| 75.0% Δp | 0.029% | +| Median Δp | 0.000% | +| 25.0% Δp | -0.030% | +| 10.0% Δp | -0.126% | +| 5.0% Δp | -0.207% | +| 1.0% Δp | -0.434% | +| 0.1% Δp | -1.016% | +| Minimum Δp | -4.672% | +| RMS Δp | 0.150 ± 0.001 % | +| Same top p | 99.739 ± 0.013 % | ## Old Numbers From 30e70334f71b3bd115024affcf98cac3d79aaa95 Mon Sep 17 00:00:00 2001 From: "k.h.lai" Date: Mon, 13 May 2024 22:02:36 +0800 Subject: [PATCH 06/51] llava-cli: fix base64 prompt (#7248) --- examples/llava/llava-cli.cpp | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index da60ddf2f..a6d67e5d7 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -300,14 +300,10 @@ int main(int argc, char ** argv) { return 1; } - for (auto & image : params.image) { + if (prompt_contains_image(params.prompt)) { auto ctx_llava = llava_init_context(¶ms, model); - auto image_embed = load_image(ctx_llava, ¶ms, image); - if (!image_embed) { - std::cerr << "error: failed to load image " << image << ". Terminating\n\n"; - return 1; - } + auto image_embed = load_image(ctx_llava, ¶ms, ""); // process the prompt process_prompt(ctx_llava, image_embed, ¶ms, params.prompt); @@ -316,7 +312,26 @@ int main(int argc, char ** argv) { llava_image_embed_free(image_embed); ctx_llava->model = NULL; llava_free(ctx_llava); + } else { + for (auto & image : params.image) { + auto ctx_llava = llava_init_context(¶ms, model); + + auto image_embed = load_image(ctx_llava, ¶ms, image); + if (!image_embed) { + std::cerr << "error: failed to load image " << image << ". Terminating\n\n"; + return 1; + } + + // process the prompt + process_prompt(ctx_llava, image_embed, ¶ms, params.prompt); + + llama_print_timings(ctx_llava->ctx_llama); + llava_image_embed_free(image_embed); + ctx_llava->model = NULL; + llava_free(ctx_llava); + } } + llama_free_model(model); return 0; From 614d3b914e1c3e02596f869649eb4f1d3b68614d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 13 May 2024 17:15:15 +0300 Subject: [PATCH 07/51] llama : less KV padding when FA is off (#7257) ggml-ci --- llama.cpp | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/llama.cpp b/llama.cpp index adbcc07e2..202bf94c8 100644 --- a/llama.cpp +++ b/llama.cpp @@ -2805,6 +2805,11 @@ static void llama_kv_cache_defrag(struct llama_kv_cache & cache) { cache.do_defrag = true; } +static uint32_t llama_kv_cache_get_padding(const struct llama_cparams & cparams) { + // the FA kernels require padding to avoid extra runtime boundary checks + return cparams.flash_attn ? 256u : 32u; +} + // // model loading and saving // @@ -11510,7 +11515,8 @@ static int llama_decode_internal( // a heuristic, to avoid attending the full cache if it is not yet utilized // after enough generations, the benefit from this heuristic disappears // if we start defragmenting the cache, the benefit from this will be more important - kv_self.n = std::min(kv_self.size, std::max(256u, GGML_PAD(llama_kv_cache_cell_max(kv_self), 256))); + const uint32_t pad = llama_kv_cache_get_padding(cparams); + kv_self.n = std::min(kv_self.size, std::max(pad, GGML_PAD(llama_kv_cache_cell_max(kv_self), pad))); //kv_self.n = llama_kv_cache_cell_max(kv_self); } } @@ -15511,6 +15517,11 @@ struct llama_context * llama_new_context_with_model( return nullptr; } + if (params.flash_attn && model->arch == LLM_ARCH_GROK) { + LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__); + params.flash_attn = false; + } + llama_context * ctx = new llama_context(*model); const auto & hparams = model->hparams; @@ -15534,7 +15545,7 @@ struct llama_context * llama_new_context_with_model( cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale; // this is necessary due to kv_self.n being padded later during inference - cparams.n_ctx = GGML_PAD(cparams.n_ctx, 256); + cparams.n_ctx = GGML_PAD(cparams.n_ctx, llama_kv_cache_get_padding(cparams)); // with causal attention, the batch size is limited by the context size cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch; @@ -15579,11 +15590,6 @@ struct llama_context * llama_new_context_with_model( } } - if (cparams.flash_attn && model->arch == LLM_ARCH_GROK) { - LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__); - cparams.flash_attn = false; - } - if (params.seed == LLAMA_DEFAULT_SEED) { params.seed = time(NULL); } From ee52225067622babc277371511b8124884e1c797 Mon Sep 17 00:00:00 2001 From: compilade Date: Mon, 13 May 2024 14:10:51 -0400 Subject: [PATCH 08/51] convert-hf : support direct Q8_0 conversion (#7234) * convert-hf : support q8_0 conversion * convert-hf : add missing ftype This was messing with the checksums otherwise. * convert-hf : add missing ftype to Baichuan and Xverse I didn't notice these on my first pass. --- convert-hf-to-gguf.py | 72 +++++++++--------------- gguf-py/gguf/__init__.py | 1 + gguf-py/gguf/gguf_writer.py | 16 ++++-- gguf-py/gguf/lazy.py | 29 +++++++--- gguf-py/gguf/quants.py | 109 ++++++++++++++++++++++++++++++++++++ 5 files changed, 169 insertions(+), 58 deletions(-) create mode 100644 gguf-py/gguf/quants.py diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index d6e5dece0..cd875fa4a 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -240,23 +240,6 @@ class Model: return False def write_tensors(self): - # same as ggml_compute_fp32_to_bf16 in ggml-impl.h - def np_fp32_to_bf16(n: np.ndarray): - # force nan to quiet - n = np.where((n & 0x7fffffff) > 0x7f800000, (n & 0xffff0000) | (64 << 16), n) - # flush subnormals to zero - n = np.where((n & 0x7f800000) == 0, n & 0x80000000, n) - # round to nearest even - n = (n + (0x7fff + ((n >> 16) & 1))) >> 16 - return n.astype(np.int16) - - # Doing this row-wise is much, much faster than element-wise, hence the signature - v_fp32_to_bf16 = np.vectorize(np_fp32_to_bf16, otypes=[np.int16], signature="(n)->(n)") - if self.lazy: - # TODO: find a way to implicitly wrap np.vectorize functions - # NOTE: the type is changed to reflect otypes passed to np.vectorize above - v_fp32_to_bf16 = gguf.LazyNumpyTensor._wrap_fn(v_fp32_to_bf16, meta_noop=np.int16) - max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,") for name, data_torch in self.get_tensors(): @@ -309,27 +292,31 @@ class Model: )) if self.ftype != gguf.LlamaFileType.ALL_F32 and extra_f16 and not extra_f32: - if self.ftype == gguf.LlamaFileType.MOSTLY_F16: + if self.ftype == gguf.LlamaFileType.MOSTLY_BF16: + data = gguf.quantize_bf16(data) + assert data.dtype == np.int16 + data_qtype = gguf.GGMLQuantizationType.BF16 + + elif self.ftype == gguf.LlamaFileType.MOSTLY_Q8_0 and gguf.can_quantize_to_q8_0(data): + data = gguf.quantize_q8_0(data) + assert data.dtype == np.uint8 + data_qtype = gguf.GGMLQuantizationType.Q8_0 + + else: # default to float16 for quantized tensors if data_dtype != np.float16: data = data.astype(np.float16) data_qtype = gguf.GGMLQuantizationType.F16 - elif self.ftype == gguf.LlamaFileType.MOSTLY_BF16: - if data_dtype != np.float32: - data = data.astype(np.float32) - data = v_fp32_to_bf16(data.view(np.int32)) - assert data.dtype == np.int16 - data_qtype = gguf.GGMLQuantizationType.BF16 - - else: # by default, convert to float32 + if data_qtype is None: # by default, convert to float32 if data_dtype != np.float32: data = data.astype(np.float32) data_qtype = gguf.GGMLQuantizationType.F32 - assert data_qtype is not None - + block_size, type_size = gguf.GGML_QUANT_SIZES[data_qtype] # reverse shape to make it similar to the internal ggml dimension order - shape_str = f"{{{', '.join(str(n) for n in reversed(data.shape))}}}" + shape_str = f"""{{{', '.join(str(n) for n in reversed( + (*data.shape[:-1], data.shape[-1] * data.dtype.itemsize // type_size * block_size)) + )}}}""" # n_dims is implicit in the shape logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}") @@ -859,6 +846,7 @@ class BaichuanModel(Model): self.gguf_writer.add_head_count(head_count) self.gguf_writer.add_head_count_kv(head_count_kv) self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) + self.gguf_writer.add_file_type(self.ftype) if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: if self.hparams["rope_scaling"].get("type") == "linear": @@ -981,6 +969,7 @@ class XverseModel(Model): self.gguf_writer.add_head_count(head_count) self.gguf_writer.add_head_count_kv(head_count_kv) self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) + self.gguf_writer.add_file_type(self.ftype) if self.hparams.get("rope_scaling") is not None and "factor" in self.hparams["rope_scaling"]: if self.hparams["rope_scaling"].get("type") == "linear": @@ -1215,6 +1204,7 @@ class StableLMModel(Model): self.gguf_writer.add_head_count_kv(hparams["num_key_value_heads"]) self.gguf_writer.add_parallel_residual(hparams["use_parallel_residual"] if "use_parallel_residual" in hparams else True) self.gguf_writer.add_layer_norm_eps(self.find_hparam(["layer_norm_eps", "norm_eps"])) + self.gguf_writer.add_file_type(self.ftype) _q_norms: list[dict[str, Tensor]] | None = None _k_norms: list[dict[str, Tensor]] | None = None @@ -1591,6 +1581,7 @@ class QwenModel(Model): self.gguf_writer.add_rope_dimension_count(self.hparams["hidden_size"] // self.hparams["num_attention_heads"]) self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) self.gguf_writer.add_layer_norm_rms_eps(self.hparams["layer_norm_epsilon"]) + self.gguf_writer.add_file_type(self.ftype) @Model.register("Qwen2ForCausalLM") @@ -1828,6 +1819,7 @@ class PlamoModel(Model): self.gguf_writer.add_head_count(hparams["num_attention_heads"]) self.gguf_writer.add_head_count_kv(5) # hparams["num_key_value_heads"]) is wrong self.gguf_writer.add_layer_norm_rms_eps(hparams["rms_norm_eps"]) + self.gguf_writer.add_file_type(self.ftype) def shuffle_attn_q_weight(self, data_torch): assert data_torch.size() == (5120, 5120) @@ -2007,6 +1999,7 @@ in chat mode so that the conversation can end normally.") self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) self.gguf_writer.add_layer_norm_rms_eps(self.hparams["rms_norm_eps"]) self.gguf_writer.add_head_count_kv(self.hparams["num_key_value_heads"]) + self.gguf_writer.add_file_type(self.ftype) def modify_tensors(self, data_torch: Tensor, name: str, bid: int | None) -> Iterable[tuple[str, Tensor]]: num_heads = self.hparams["num_attention_heads"] @@ -2415,25 +2408,15 @@ class LazyTorchTensor(gguf.LazyBase): def numpy(self) -> gguf.LazyNumpyTensor: dtype = self._dtype_map[self.dtype] return gguf.LazyNumpyTensor( - meta=np.lib.stride_tricks.as_strided(np.zeros(1, dtype), self.shape, (0 for _ in self.shape)), + meta=gguf.LazyNumpyTensor.meta_with_dtype_and_shape(dtype, self.shape), lazy=self._lazy, args=(self,), func=(lambda s: s[0].numpy()) ) @classmethod - def eager_to_meta(cls, t: Tensor) -> Tensor: - if t.is_meta: - return t - return t.detach().to("meta") - - @classmethod - def meta_with_dtype(cls, m: Tensor, dtype: torch.dtype) -> Tensor: - m = m.detach() - if not m.is_meta: - m = m.to("meta") - m.dtype = dtype - return m + def meta_with_dtype_and_shape(cls, dtype: torch.dtype, shape: torch.Size) -> Tensor: + return torch.empty(size=shape, dtype=dtype, device="meta") @classmethod def __torch_function__(cls, func, types, args=(), kwargs=None): @@ -2464,8 +2447,8 @@ def parse_args() -> argparse.Namespace: help="path to write to; default: based on input. {ftype} will be replaced by the outtype.", ) parser.add_argument( - "--outtype", type=str, choices=["f32", "f16", "bf16", "auto"], default="f16", - help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type", + "--outtype", type=str, choices=["f32", "f16", "bf16", "q8_0", "auto"], default="f16", + help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, q8_0 for Q8_0, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type", ) parser.add_argument( "--bigendian", action="store_true", @@ -2523,6 +2506,7 @@ def main() -> None: "f32": gguf.LlamaFileType.ALL_F32, "f16": gguf.LlamaFileType.MOSTLY_F16, "bf16": gguf.LlamaFileType.MOSTLY_BF16, + "q8_0": gguf.LlamaFileType.MOSTLY_Q8_0, "auto": gguf.LlamaFileType.GUESSED, } diff --git a/gguf-py/gguf/__init__.py b/gguf-py/gguf/__init__.py index e5d5806c8..ea5146b16 100644 --- a/gguf-py/gguf/__init__.py +++ b/gguf-py/gguf/__init__.py @@ -2,5 +2,6 @@ from .constants import * from .lazy import * from .gguf_reader import * from .gguf_writer import * +from .quants import * from .tensor_mapping import * from .vocab import * diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py index 96574358d..d5e323a52 100644 --- a/gguf-py/gguf/gguf_writer.py +++ b/gguf-py/gguf/gguf_writer.py @@ -13,6 +13,7 @@ from string import ascii_letters, digits import numpy as np from .constants import ( + GGML_QUANT_SIZES, GGUF_DEFAULT_ALIGNMENT, GGUF_MAGIC, GGUF_VERSION, @@ -195,7 +196,7 @@ class GGUFWriter: return ((x + n - 1) // n) * n def add_tensor_info( - self, name: str, tensor_shape: Sequence[int], tensor_dtype: np.dtype[np.float16] | np.dtype[np.float32], + self, name: str, tensor_shape: Sequence[int], tensor_dtype: np.dtype, tensor_nbytes: int, raw_dtype: GGMLQuantizationType | None = None, ) -> None: if self.state is not WriterState.EMPTY: @@ -208,10 +209,6 @@ class GGUFWriter: encoded_name = name.encode("utf-8") self.ti_data += self._pack("Q", len(encoded_name)) self.ti_data += encoded_name - n_dims = len(tensor_shape) - self.ti_data += self._pack("I", n_dims) - for i in range(n_dims): - self.ti_data += self._pack("Q", tensor_shape[n_dims - 1 - i]) if raw_dtype is None: if tensor_dtype == np.float16: dtype = GGMLQuantizationType.F16 @@ -231,6 +228,15 @@ class GGUFWriter: raise ValueError("Only F16, F32, F64, I8, I16, I32, I64 tensors are supported for now") else: dtype = raw_dtype + if tensor_dtype == np.uint8: + block_size, type_size = GGML_QUANT_SIZES[raw_dtype] + if tensor_shape[-1] % type_size != 0: + raise ValueError(f"Quantized tensor row size ({tensor_shape[-1]}) is not a multiple of {dtype.name} type size ({type_size})") + tensor_shape = tuple(tensor_shape[:-1]) + (tensor_shape[-1] // type_size * block_size,) + n_dims = len(tensor_shape) + self.ti_data += self._pack("I", n_dims) + for i in range(n_dims): + self.ti_data += self._pack("Q", tensor_shape[n_dims - 1 - i]) self.ti_data += self._pack("I", dtype) self.ti_data += self._pack("Q", self.offset_tensor) self.offset_tensor += GGUFWriter.ggml_pad(tensor_nbytes, self.data_alignment) diff --git a/gguf-py/gguf/lazy.py b/gguf-py/gguf/lazy.py index 650bea11c..1167335b8 100644 --- a/gguf-py/gguf/lazy.py +++ b/gguf-py/gguf/lazy.py @@ -6,6 +6,7 @@ from typing import Any, Callable from collections import deque import numpy as np +from numpy._typing import _Shape from numpy.typing import DTypeLike @@ -110,7 +111,7 @@ class LazyBase(ABC, metaclass=LazyMeta): return o @classmethod - def _wrap_fn(cls, fn: Callable, *, use_self: LazyBase | None = None, meta_noop: bool | DTypeLike = False) -> Callable[[Any], Any]: + def _wrap_fn(cls, fn: Callable, *, use_self: LazyBase | None = None, meta_noop: bool | DTypeLike | tuple[DTypeLike, Callable[[tuple[int, ...]], tuple[int, ...]]] = False) -> Callable[[Any], Any]: def wrapped_fn(*args, **kwargs): if kwargs is None: kwargs = {} @@ -130,9 +131,14 @@ class LazyBase(ABC, metaclass=LazyMeta): res = args[0] assert isinstance(res, cls) res = res._meta - # allow operations to override the dtype + # allow operations to override the dtype and shape if meta_noop is not True: - res = cls.meta_with_dtype(res, meta_noop) + if isinstance(meta_noop, tuple): + dtype, shape = meta_noop + assert callable(shape) + res = cls.meta_with_dtype_and_shape(dtype, shape(res.shape)) + else: + res = cls.meta_with_dtype_and_shape(meta_noop, res.shape) if isinstance(res, cls._tensor_type): def collect_replace(t: LazyBase): @@ -168,7 +174,12 @@ class LazyBase(ABC, metaclass=LazyMeta): while _t._data is None: lt = _t._lazy.popleft() if lt._data is not None: - raise ValueError(f"{lt} did not belong in the lazy queue") + # Lazy tensor did not belong in the lazy queue. + # Weirdly only happens with Bloom models... + # likely because tensors aren't unique in the queue. + # The final output is still the same as in eager mode, + # so it's safe to ignore this. + continue assert lt._func is not None lt._args = cls._recurse_apply(lt._args, already_eager_to_eager) lt._data = lt._func(lt._args) @@ -183,12 +194,12 @@ class LazyBase(ABC, metaclass=LazyMeta): @classmethod def eager_to_meta(cls, t: Any) -> Any: - return cls.meta_with_dtype(t, t.dtype) + return cls.meta_with_dtype_and_shape(t.dtype, t.shape) # must be overridden, meta tensor init is backend-specific @classmethod @abstractmethod - def meta_with_dtype(cls, m: Any, dtype: Any) -> Any: pass + def meta_with_dtype_and_shape(cls, dtype: Any, shape: Any) -> Any: pass @classmethod def from_eager(cls, t: Any) -> Any: @@ -205,15 +216,15 @@ class LazyNumpyTensor(LazyBase): _tensor_type = np.ndarray @classmethod - def meta_with_dtype(cls, m: np.ndarray[Any, Any], dtype: DTypeLike) -> np.ndarray[Any, Any]: + def meta_with_dtype_and_shape(cls, dtype: DTypeLike, shape: _Shape) -> np.ndarray[Any, Any]: # The initial idea was to use np.nan as the fill value, # but non-float types like np.int16 can't use that. # So zero it is. cheat = np.zeros(1, dtype) - return np.lib.stride_tricks.as_strided(cheat, m.shape, (0 for _ in m.shape)) + return np.lib.stride_tricks.as_strided(cheat, shape, (0 for _ in shape)) def astype(self, dtype, *args, **kwargs): - meta = type(self).meta_with_dtype(self._meta, dtype) + meta = type(self).meta_with_dtype_and_shape(dtype, self._meta.shape) full_args = (self, dtype,) + args # very important to pass the shared _lazy deque, or else there's an infinite loop somewhere. return type(self)(meta=meta, args=full_args, lazy=self._lazy, func=(lambda a: a[0].astype(*a[1:], **kwargs))) diff --git a/gguf-py/gguf/quants.py b/gguf-py/gguf/quants.py new file mode 100644 index 000000000..e7fc0eae3 --- /dev/null +++ b/gguf-py/gguf/quants.py @@ -0,0 +1,109 @@ +from __future__ import annotations +from typing import Callable + +from numpy.typing import DTypeLike + +from .constants import GGML_QUANT_SIZES, GGMLQuantizationType +from .lazy import LazyNumpyTensor + +import numpy as np + + +# same as ggml_compute_fp32_to_bf16 in ggml-impl.h +def __compute_fp32_to_bf16(n: np.ndarray) -> np.ndarray: + n = n.astype(np.float32, copy=False).view(np.int32) + # force nan to quiet + n = np.where((n & 0x7fffffff) > 0x7f800000, (n & 0xffff0000) | (64 << 16), n) + # flush subnormals to zero + n = np.where((n & 0x7f800000) == 0, n & 0x80000000, n) + # round to nearest even + n = (n + (0x7fff + ((n >> 16) & 1))) >> 16 + return n.astype(np.int16) + + +# This is faster than np.vectorize and np.apply_along_axis because it works on more than one row at a time +def __apply_over_grouped_rows(func: Callable[[np.ndarray], np.ndarray], arr: np.ndarray, otype: DTypeLike, oshape: tuple[int, ...]) -> np.ndarray: + rows = arr.reshape((-1, arr.shape[-1])) + osize = 1 + for dim in oshape: + osize *= dim + out = np.empty(shape=osize, dtype=otype) + # compute over groups of 16 rows (arbitrary, but seems good for performance) + n_groups = rows.shape[0] // 16 + np.concatenate([func(group).ravel() for group in np.array_split(rows, n_groups)], axis=0, out=out) + return out.reshape(oshape) + + +def __quantize_bf16_array(n: np.ndarray) -> np.ndarray: + return __apply_over_grouped_rows(__compute_fp32_to_bf16, arr=n, otype=np.int16, oshape=n.shape) + + +__quantize_bf16_lazy = LazyNumpyTensor._wrap_fn(__quantize_bf16_array, meta_noop=np.int16) + + +def quantize_bf16(n: np.ndarray): + if type(n) is LazyNumpyTensor: + return __quantize_bf16_lazy(n) + else: + return __quantize_bf16_array(n) + + +__q8_block_size, __q8_type_size = GGML_QUANT_SIZES[GGMLQuantizationType.Q8_0] + + +def can_quantize_to_q8_0(n: np.ndarray) -> bool: + return n.shape[-1] % __q8_block_size == 0 + + +# round away from zero +# ref: https://stackoverflow.com/a/59143326/22827863 +def np_roundf(n: np.ndarray) -> np.ndarray: + a = abs(n) + floored = np.floor(a) + b = floored + np.floor(2 * (a - floored)) + return np.sign(n) * b + + +def __quantize_q8_0_shape_change(s: tuple[int, ...]) -> tuple[int, ...]: + return (*s[:-1], s[-1] // __q8_block_size * __q8_type_size) + + +# Implementation of Q8_0 with bit-exact same results as reference implementation in ggml-quants.c +def __quantize_q8_0_rows(n: np.ndarray) -> np.ndarray: + shape = n.shape + assert shape[-1] % __q8_block_size == 0 + + n_blocks = n.size // __q8_block_size + + blocks = n.reshape((n_blocks, __q8_block_size)).astype(np.float32, copy=False) + + d = abs(blocks).max(axis=1, keepdims=True) / 127 + with np.errstate(divide="ignore"): + id = np.where(d == 0, 0, 1 / d) + qs = np_roundf(blocks * id) + + # (n_blocks, 2) + d = d.astype(np.float16).view(np.uint8) + # (n_blocks, block_size) + qs = qs.astype(np.int8).view(np.uint8) + + assert d.shape[1] + qs.shape[1] == __q8_type_size + + return np.concatenate([d, qs], axis=1).reshape(__quantize_q8_0_shape_change(shape)) + + +def __quantize_q8_0_array(n: np.ndarray) -> np.ndarray: + return __apply_over_grouped_rows(__quantize_q8_0_rows, arr=n, otype=np.uint8, oshape=__quantize_q8_0_shape_change(n.shape)) + + +__quantize_q8_0_lazy = LazyNumpyTensor._wrap_fn( + __quantize_q8_0_array, + meta_noop=(np.uint8, __quantize_q8_0_shape_change), +) + + +def quantize_q8_0(data: np.ndarray): + if type(data) is LazyNumpyTensor: + return __quantize_q8_0_lazy(data) + else: + return __quantize_q8_0_array(data) From 27f65d6267cf22a44c5ccefa7765d53a05bd1259 Mon Sep 17 00:00:00 2001 From: Ryuei Date: Tue, 14 May 2024 14:20:47 +0900 Subject: [PATCH 09/51] docs: Fix typo and update description for --embeddings flag (#7026) - Change '--embedding' to '--embeddings' in the README - Update the description to match the latest --help output - Added a caution about defining physical batch size --- examples/server/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/server/README.md b/examples/server/README.md index 650317991..f6eb6942c 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -48,7 +48,7 @@ page cache before using this. See https://github.com/ggerganov/llama.cpp/issues/ - `--path`: Path from which to serve static files. Default: disabled - `--api-key`: Set an api key for request authorization. By default, the server responds to every request. With an api key set, the requests must have the Authorization header set with the api key as Bearer token. May be used multiple times to enable multiple valid keys. - `--api-key-file`: Path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access. May be used in conjunction with `--api-key`s. -- `--embedding`: Enable embedding extraction. Default: disabled +- `--embeddings`: Enable embedding vector output and the OAI compatible endpoint /v1/embeddings. Physical batch size (`--ubatch-size`) must be carefully defined. Default: disabled - `-np N`, `--parallel N`: Set the number of slots for process requests. Default: `1` - `-cb`, `--cont-batching`: Enable continuous batching (a.k.a dynamic batching). Default: disabled - `-spf FNAME`, `--system-prompt-file FNAME` Set a file to load a system prompt (initial prompt of all slots). This is useful for chat applications. [See more](#change-system-prompt-on-runtime) From e0f556186b6e1f2b7032a1479edf5e89e2b1bd86 Mon Sep 17 00:00:00 2001 From: Haggai Nuchi Date: Mon, 13 May 2024 22:25:56 -0700 Subject: [PATCH 10/51] Add left recursion check: quit early instead of going into an infinite loop (#7083) * Add left recursion check: quit early instead of going into an infinite loop * Remove custom enum, rename left recursion check and move to "grammar internal" section, add handling for edge case where a leftmost nonterminal may be empty * Remove unnecessary declaration --- llama.cpp | 68 ++++++++++++++++++++++++++++++ tests/test-grammar-integration.cpp | 46 ++++++++++++++++++++ 2 files changed, 114 insertions(+) diff --git a/llama.cpp b/llama.cpp index 202bf94c8..01a35dfb6 100644 --- a/llama.cpp +++ b/llama.cpp @@ -13182,6 +13182,58 @@ static std::vector llama_grammar_reject_candidates( return rejects; } +static bool llama_grammar_detect_left_recursion( + const std::vector> & rules, + size_t rule_index, + std::vector * rules_visited, + std::vector * rules_in_progress, + std::vector * rules_may_be_empty) { + if ((*rules_in_progress)[rule_index]) { + return true; + } + + (*rules_in_progress)[rule_index] = true; + + const std::vector & rule = rules[rule_index]; + + // First check if the rule might produce the empty string. This could be done combined with the second + // step but it's more readable as two steps. + bool at_rule_start = true; + for (size_t i = 0; i < rule.size(); i++) { + if (llama_grammar_is_end_of_sequence(&rule[i])) { + if (at_rule_start) { + (*rules_may_be_empty)[rule_index] = true; + break; + } + at_rule_start = true; + } else { + at_rule_start = false; + } + } + + // Second, recurse into leftmost nonterminals (or next-leftmost as long as the previous nonterminal may + // be empty) + bool recurse_into_nonterminal = true; + for (size_t i = 0; i < rule.size(); i++) { + if (rule[i].type == LLAMA_GRETYPE_RULE_REF && recurse_into_nonterminal) { + if (llama_grammar_detect_left_recursion(rules, (size_t)rule[i].value, rules_visited, rules_in_progress, rules_may_be_empty)) { + return true; + } + if (!((*rules_may_be_empty)[(size_t)rule[i].value])) { + recurse_into_nonterminal = false; + } + } else if (llama_grammar_is_end_of_sequence(&rule[i])) { + recurse_into_nonterminal = true; + } else { + recurse_into_nonterminal = false; + } + } + + (*rules_in_progress)[rule_index] = false; + (*rules_visited)[rule_index] = true; + return false; +} + // // grammar - external // @@ -13201,6 +13253,19 @@ struct llama_grammar * llama_grammar_init( vec_rules[i].push_back({LLAMA_GRETYPE_END, 0}); } + // Check for left recursion + std::vector rules_visited(n_rules); + std::vector rules_in_progress(n_rules); + std::vector rules_may_be_empty(n_rules); + for (size_t i = 0; i < n_rules; i++) { + if (rules_visited[i]) { + continue; + } + if (llama_grammar_detect_left_recursion(vec_rules, i, &rules_visited, &rules_in_progress, &rules_may_be_empty)) { + throw std::runtime_error(format("unsupported grammar, left recursion detected for nonterminal at index %zu", i)); + } + } + // loop over alternates of start rule to build initial stacks std::vector> stacks; pos = vec_rules[start_rule_index].data(); @@ -13223,6 +13288,9 @@ struct llama_grammar * llama_grammar_init( } } while (true); + // Important: vec_rules has to be moved here, not copied, because stacks contains + // pointers to elements of vec_rules. If vec_rules were copied into llama_grammar + // then the pointers would be invalidated when the local vec_rules goes out of scope. return new llama_grammar{ std::move(vec_rules), std::move(stacks), {} }; } diff --git a/tests/test-grammar-integration.cpp b/tests/test-grammar-integration.cpp index 1a4004e2a..01c5bb27a 100644 --- a/tests/test-grammar-integration.cpp +++ b/tests/test-grammar-integration.cpp @@ -28,6 +28,19 @@ static llama_grammar* build_grammar(const std::string & grammar_str) { return grammar; } +static bool test_build_grammar_fails(const std::string & grammar_str) { + fprintf(stderr, "⚫ Testing failure for grammar: %s\n", grammar_str.c_str()); + bool grammar_fails = false; + try { + build_grammar(grammar_str); + fprintf(stderr, " ❌ Expected build failure, but succeeded\n"); + } catch (const std::exception & err) { + grammar_fails = true; + fprintf(stdout, " ✅︎\n"); + } + return grammar_fails; +} + static bool match_string(const std::string & input, llama_grammar* grammar) { auto decoded = decode_utf8(input, {}); @@ -320,6 +333,38 @@ number ::= [0-9]+)"""; fprintf(stderr, " ✅︎ Passed\n"); } +static void test_failure_left_recursion() { + fprintf(stderr, "⚫ Testing left recursion detection:\n"); + + // Test simple left recursion detection + const std::string simple_str = R"""(root ::= "a" | root "a")"""; + assert(test_build_grammar_fails(simple_str)); + + // Test more complicated left recursion detection + const std::string medium_str = R"""( +root ::= asdf +asdf ::= "a" | asdf "a" +)"""; + assert(test_build_grammar_fails(medium_str)); + + // Test even more complicated left recursion detection + const std::string hard_str = R"""( +root ::= asdf +asdf ::= "a" | foo "b" +foo ::= "c" | asdf "d" | "e")"""; + assert(test_build_grammar_fails(hard_str)); + + // Test yet even more complicated left recursion detection + const std::string hardest_str = R"""( +root ::= asdf +asdf ::= "a" | foo "b" +foo ::= "c" | empty asdf "d" | "e" +empty ::= "blah" | )"""; + assert(test_build_grammar_fails(hardest_str)); + + fprintf(stderr, " ✅︎ Passed\n"); +} + int main() { fprintf(stdout, "Running grammar integration tests...\n"); test_simple_grammar(); @@ -327,6 +372,7 @@ int main() { test_quantifiers(); test_failure_missing_root(); test_failure_missing_reference(); + test_failure_left_recursion(); fprintf(stdout, "All tests passed.\n"); return 0; } From efc8f767c8c8c749a245dd96ad4e2f37c164b54c Mon Sep 17 00:00:00 2001 From: Elton Kola Date: Tue, 14 May 2024 03:30:30 -0400 Subject: [PATCH 11/51] move ndk code to a new library (#6951) --- examples/llama.android/app/build.gradle.kts | 25 +--------- .../java/com/example/llama/MainViewModel.kt | 13 ++--- examples/llama.android/build.gradle.kts | 1 + examples/llama.android/llama/.gitignore | 1 + .../src/main/cpp => llama}/CMakeLists.txt | 2 +- .../llama.android/llama/consumer-rules.pro | 0 .../llama.android/llama/proguard-rules.pro | 21 ++++++++ .../llama/cpp/ExampleInstrumentedTest.kt | 24 +++++++++ .../llama/src/main/AndroidManifest.xml | 4 ++ .../llama/src/main/cpp/CMakeLists.txt | 49 +++++++++++++++++++ .../src/main/cpp/llama-android.cpp | 28 +++++------ .../java/android/llama/cpp/LLamaAndroid.kt} | 8 +-- .../java/android/llama/cpp/ExampleUnitTest.kt | 17 +++++++ examples/llama.android/settings.gradle.kts | 1 + 14 files changed, 145 insertions(+), 49 deletions(-) create mode 100644 examples/llama.android/llama/.gitignore rename examples/llama.android/{app/src/main/cpp => llama}/CMakeLists.txt (98%) create mode 100644 examples/llama.android/llama/consumer-rules.pro create mode 100644 examples/llama.android/llama/proguard-rules.pro create mode 100644 examples/llama.android/llama/src/androidTest/java/android/llama/cpp/ExampleInstrumentedTest.kt create mode 100644 examples/llama.android/llama/src/main/AndroidManifest.xml create mode 100644 examples/llama.android/llama/src/main/cpp/CMakeLists.txt rename examples/llama.android/{app => llama}/src/main/cpp/llama-android.cpp (92%) rename examples/llama.android/{app/src/main/java/com/example/llama/Llm.kt => llama/src/main/java/android/llama/cpp/LLamaAndroid.kt} (97%) create mode 100644 examples/llama.android/llama/src/test/java/android/llama/cpp/ExampleUnitTest.kt diff --git a/examples/llama.android/app/build.gradle.kts b/examples/llama.android/app/build.gradle.kts index d42140efe..8d1b37195 100644 --- a/examples/llama.android/app/build.gradle.kts +++ b/examples/llama.android/app/build.gradle.kts @@ -7,8 +7,6 @@ android { namespace = "com.example.llama" compileSdk = 34 - ndkVersion = "26.1.10909125" - defaultConfig { applicationId = "com.example.llama" minSdk = 33 @@ -20,17 +18,6 @@ android { vectorDrawables { useSupportLibrary = true } - ndk { - // Add NDK properties if wanted, e.g. - // abiFilters += listOf("arm64-v8a") - } - externalNativeBuild { - cmake { - arguments += "-DCMAKE_BUILD_TYPE=Release" - cppFlags += listOf() - arguments += listOf() - } - } } buildTypes { @@ -55,17 +42,6 @@ android { composeOptions { kotlinCompilerExtensionVersion = "1.5.1" } - packaging { - resources { - excludes += "/META-INF/{AL2.0,LGPL2.1}" - } - } - externalNativeBuild { - cmake { - path = file("src/main/cpp/CMakeLists.txt") - version = "3.22.1" - } - } } dependencies { @@ -78,6 +54,7 @@ dependencies { implementation("androidx.compose.ui:ui-graphics") implementation("androidx.compose.ui:ui-tooling-preview") implementation("androidx.compose.material3:material3") + implementation(project(":llama")) testImplementation("junit:junit:4.13.2") androidTestImplementation("androidx.test.ext:junit:1.1.5") androidTestImplementation("androidx.test.espresso:espresso-core:3.5.1") diff --git a/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt b/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt index be95e2221..45ac29938 100644 --- a/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt +++ b/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt @@ -1,5 +1,6 @@ package com.example.llama +import android.llama.cpp.LLamaAndroid import android.util.Log import androidx.compose.runtime.getValue import androidx.compose.runtime.mutableStateOf @@ -9,7 +10,7 @@ import androidx.lifecycle.viewModelScope import kotlinx.coroutines.flow.catch import kotlinx.coroutines.launch -class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() { +class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instance()): ViewModel() { companion object { @JvmStatic private val NanosPerSecond = 1_000_000_000.0 @@ -28,7 +29,7 @@ class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() { viewModelScope.launch { try { - llm.unload() + llamaAndroid.unload() } catch (exc: IllegalStateException) { messages += exc.message!! } @@ -44,7 +45,7 @@ class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() { messages += "" viewModelScope.launch { - llm.send(text) + llamaAndroid.send(text) .catch { Log.e(tag, "send() failed", it) messages += it.message!! @@ -57,7 +58,7 @@ class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() { viewModelScope.launch { try { val start = System.nanoTime() - val warmupResult = llm.bench(pp, tg, pl, nr) + val warmupResult = llamaAndroid.bench(pp, tg, pl, nr) val end = System.nanoTime() messages += warmupResult @@ -70,7 +71,7 @@ class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() { return@launch } - messages += llm.bench(512, 128, 1, 3) + messages += llamaAndroid.bench(512, 128, 1, 3) } catch (exc: IllegalStateException) { Log.e(tag, "bench() failed", exc) messages += exc.message!! @@ -81,7 +82,7 @@ class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() { fun load(pathToModel: String) { viewModelScope.launch { try { - llm.load(pathToModel) + llamaAndroid.load(pathToModel) messages += "Loaded $pathToModel" } catch (exc: IllegalStateException) { Log.e(tag, "load() failed", exc) diff --git a/examples/llama.android/build.gradle.kts b/examples/llama.android/build.gradle.kts index 50ebc8211..acd1ada7d 100644 --- a/examples/llama.android/build.gradle.kts +++ b/examples/llama.android/build.gradle.kts @@ -2,4 +2,5 @@ plugins { id("com.android.application") version "8.2.0" apply false id("org.jetbrains.kotlin.android") version "1.9.0" apply false + id("com.android.library") version "8.2.0" apply false } diff --git a/examples/llama.android/llama/.gitignore b/examples/llama.android/llama/.gitignore new file mode 100644 index 000000000..796b96d1c --- /dev/null +++ b/examples/llama.android/llama/.gitignore @@ -0,0 +1 @@ +/build diff --git a/examples/llama.android/app/src/main/cpp/CMakeLists.txt b/examples/llama.android/llama/CMakeLists.txt similarity index 98% rename from examples/llama.android/app/src/main/cpp/CMakeLists.txt rename to examples/llama.android/llama/CMakeLists.txt index 85139329a..bb5738ae3 100644 --- a/examples/llama.android/app/src/main/cpp/CMakeLists.txt +++ b/examples/llama.android/llama/CMakeLists.txt @@ -37,7 +37,7 @@ FetchContent_MakeAvailable(llama) # used in the AndroidManifest.xml file. add_library(${CMAKE_PROJECT_NAME} SHARED # List C/C++ source files with relative paths to this CMakeLists.txt. - llama-android.cpp) + llama-android.cpp) # Specifies libraries CMake should link to your target library. You # can link libraries from various origins, such as libraries defined in this diff --git a/examples/llama.android/llama/consumer-rules.pro b/examples/llama.android/llama/consumer-rules.pro new file mode 100644 index 000000000..e69de29bb diff --git a/examples/llama.android/llama/proguard-rules.pro b/examples/llama.android/llama/proguard-rules.pro new file mode 100644 index 000000000..f1b424510 --- /dev/null +++ b/examples/llama.android/llama/proguard-rules.pro @@ -0,0 +1,21 @@ +# Add project specific ProGuard rules here. +# You can control the set of applied configuration files using the +# proguardFiles setting in build.gradle. +# +# For more details, see +# http://developer.android.com/guide/developing/tools/proguard.html + +# If your project uses WebView with JS, uncomment the following +# and specify the fully qualified class name to the JavaScript interface +# class: +#-keepclassmembers class fqcn.of.javascript.interface.for.webview { +# public *; +#} + +# Uncomment this to preserve the line number information for +# debugging stack traces. +#-keepattributes SourceFile,LineNumberTable + +# If you keep the line number information, uncomment this to +# hide the original source file name. +#-renamesourcefileattribute SourceFile diff --git a/examples/llama.android/llama/src/androidTest/java/android/llama/cpp/ExampleInstrumentedTest.kt b/examples/llama.android/llama/src/androidTest/java/android/llama/cpp/ExampleInstrumentedTest.kt new file mode 100644 index 000000000..05d6ab5d2 --- /dev/null +++ b/examples/llama.android/llama/src/androidTest/java/android/llama/cpp/ExampleInstrumentedTest.kt @@ -0,0 +1,24 @@ +package android.llama.cpp + +import androidx.test.platform.app.InstrumentationRegistry +import androidx.test.ext.junit.runners.AndroidJUnit4 + +import org.junit.Test +import org.junit.runner.RunWith + +import org.junit.Assert.* + +/** + * Instrumented test, which will execute on an Android device. + * + * See [testing documentation](http://d.android.com/tools/testing). + */ +@RunWith(AndroidJUnit4::class) +class ExampleInstrumentedTest { + @Test + fun useAppContext() { + // Context of the app under test. + val appContext = InstrumentationRegistry.getInstrumentation().targetContext + assertEquals("android.llama.cpp.test", appContext.packageName) + } +} diff --git a/examples/llama.android/llama/src/main/AndroidManifest.xml b/examples/llama.android/llama/src/main/AndroidManifest.xml new file mode 100644 index 000000000..8bdb7e14b --- /dev/null +++ b/examples/llama.android/llama/src/main/AndroidManifest.xml @@ -0,0 +1,4 @@ + + + + diff --git a/examples/llama.android/llama/src/main/cpp/CMakeLists.txt b/examples/llama.android/llama/src/main/cpp/CMakeLists.txt new file mode 100644 index 000000000..42ebaad49 --- /dev/null +++ b/examples/llama.android/llama/src/main/cpp/CMakeLists.txt @@ -0,0 +1,49 @@ +# For more information about using CMake with Android Studio, read the +# documentation: https://d.android.com/studio/projects/add-native-code.html. +# For more examples on how to use CMake, see https://github.com/android/ndk-samples. + +# Sets the minimum CMake version required for this project. +cmake_minimum_required(VERSION 3.22.1) + +# Declares the project name. The project name can be accessed via ${ PROJECT_NAME}, +# Since this is the top level CMakeLists.txt, the project name is also accessible +# with ${CMAKE_PROJECT_NAME} (both CMake variables are in-sync within the top level +# build script scope). +project("llama-android") + +include(FetchContent) +FetchContent_Declare( + llama + GIT_REPOSITORY https://github.com/ggerganov/llama.cpp + GIT_TAG master +) + +# Also provides "common" +FetchContent_MakeAvailable(llama) + +# Creates and names a library, sets it as either STATIC +# or SHARED, and provides the relative paths to its source code. +# You can define multiple libraries, and CMake builds them for you. +# Gradle automatically packages shared libraries with your APK. +# +# In this top level CMakeLists.txt, ${CMAKE_PROJECT_NAME} is used to define +# the target library name; in the sub-module's CMakeLists.txt, ${PROJECT_NAME} +# is preferred for the same purpose. +# +# In order to load a library into your app from Java/Kotlin, you must call +# System.loadLibrary() and pass the name of the library defined here; +# for GameActivity/NativeActivity derived applications, the same library name must be +# used in the AndroidManifest.xml file. +add_library(${CMAKE_PROJECT_NAME} SHARED + # List C/C++ source files with relative paths to this CMakeLists.txt. + llama-android.cpp) + +# Specifies libraries CMake should link to your target library. You +# can link libraries from various origins, such as libraries defined in this +# build script, prebuilt third-party libraries, or Android system libraries. +target_link_libraries(${CMAKE_PROJECT_NAME} + # List libraries link to the target library + llama + common + android + log) diff --git a/examples/llama.android/app/src/main/cpp/llama-android.cpp b/examples/llama.android/llama/src/main/cpp/llama-android.cpp similarity index 92% rename from examples/llama.android/app/src/main/cpp/llama-android.cpp rename to examples/llama.android/llama/src/main/cpp/llama-android.cpp index 4af9de303..874158ef0 100644 --- a/examples/llama.android/app/src/main/cpp/llama-android.cpp +++ b/examples/llama.android/llama/src/main/cpp/llama-android.cpp @@ -81,7 +81,7 @@ static void log_callback(ggml_log_level level, const char * fmt, void * data) { extern "C" JNIEXPORT jlong JNICALL -Java_com_example_llama_Llm_load_1model(JNIEnv *env, jobject, jstring filename) { +Java_android_llama_cpp_LLamaAndroid_load_1model(JNIEnv *env, jobject, jstring filename) { llama_model_params model_params = llama_model_default_params(); auto path_to_model = env->GetStringUTFChars(filename, 0); @@ -101,13 +101,13 @@ Java_com_example_llama_Llm_load_1model(JNIEnv *env, jobject, jstring filename) { extern "C" JNIEXPORT void JNICALL -Java_com_example_llama_Llm_free_1model(JNIEnv *, jobject, jlong model) { +Java_android_llama_cpp_LLamaAndroid_free_1model(JNIEnv *, jobject, jlong model) { llama_free_model(reinterpret_cast(model)); } extern "C" JNIEXPORT jlong JNICALL -Java_com_example_llama_Llm_new_1context(JNIEnv *env, jobject, jlong jmodel) { +Java_android_llama_cpp_LLamaAndroid_new_1context(JNIEnv *env, jobject, jlong jmodel) { auto model = reinterpret_cast(jmodel); if (!model) { @@ -139,25 +139,25 @@ Java_com_example_llama_Llm_new_1context(JNIEnv *env, jobject, jlong jmodel) { extern "C" JNIEXPORT void JNICALL -Java_com_example_llama_Llm_free_1context(JNIEnv *, jobject, jlong context) { +Java_android_llama_cpp_LLamaAndroid_free_1context(JNIEnv *, jobject, jlong context) { llama_free(reinterpret_cast(context)); } extern "C" JNIEXPORT void JNICALL -Java_com_example_llama_Llm_backend_1free(JNIEnv *, jobject) { +Java_android_llama_cpp_LLamaAndroid_backend_1free(JNIEnv *, jobject) { llama_backend_free(); } extern "C" JNIEXPORT void JNICALL -Java_com_example_llama_Llm_log_1to_1android(JNIEnv *, jobject) { +Java_android_llama_cpp_LLamaAndroid_log_1to_1android(JNIEnv *, jobject) { llama_log_set(log_callback, NULL); } extern "C" JNIEXPORT jstring JNICALL -Java_com_example_llama_Llm_bench_1model( +Java_android_llama_cpp_LLamaAndroid_bench_1model( JNIEnv *env, jobject, jlong context_pointer, @@ -271,13 +271,13 @@ Java_com_example_llama_Llm_bench_1model( extern "C" JNIEXPORT void JNICALL -Java_com_example_llama_Llm_free_1batch(JNIEnv *, jobject, jlong batch_pointer) { +Java_android_llama_cpp_LLamaAndroid_free_1batch(JNIEnv *, jobject, jlong batch_pointer) { llama_batch_free(*reinterpret_cast(batch_pointer)); } extern "C" JNIEXPORT jlong JNICALL -Java_com_example_llama_Llm_new_1batch(JNIEnv *, jobject, jint n_tokens, jint embd, jint n_seq_max) { +Java_android_llama_cpp_LLamaAndroid_new_1batch(JNIEnv *, jobject, jint n_tokens, jint embd, jint n_seq_max) { // Source: Copy of llama.cpp:llama_batch_init but heap-allocated. @@ -313,19 +313,19 @@ Java_com_example_llama_Llm_new_1batch(JNIEnv *, jobject, jint n_tokens, jint emb extern "C" JNIEXPORT void JNICALL -Java_com_example_llama_Llm_backend_1init(JNIEnv *, jobject) { +Java_android_llama_cpp_LLamaAndroid_backend_1init(JNIEnv *, jobject) { llama_backend_init(); } extern "C" JNIEXPORT jstring JNICALL -Java_com_example_llama_Llm_system_1info(JNIEnv *env, jobject) { +Java_android_llama_cpp_LLamaAndroid_system_1info(JNIEnv *env, jobject) { return env->NewStringUTF(llama_print_system_info()); } extern "C" JNIEXPORT jint JNICALL -Java_com_example_llama_Llm_completion_1init( +Java_android_llama_cpp_LLamaAndroid_completion_1init( JNIEnv *env, jobject, jlong context_pointer, @@ -376,7 +376,7 @@ Java_com_example_llama_Llm_completion_1init( extern "C" JNIEXPORT jstring JNICALL -Java_com_example_llama_Llm_completion_1loop( +Java_android_llama_cpp_LLamaAndroid_completion_1loop( JNIEnv * env, jobject, jlong context_pointer, @@ -438,6 +438,6 @@ Java_com_example_llama_Llm_completion_1loop( extern "C" JNIEXPORT void JNICALL -Java_com_example_llama_Llm_kv_1cache_1clear(JNIEnv *, jobject, jlong context) { +Java_android_llama_cpp_LLamaAndroid_kv_1cache_1clear(JNIEnv *, jobject, jlong context) { llama_kv_cache_clear(reinterpret_cast(context)); } diff --git a/examples/llama.android/app/src/main/java/com/example/llama/Llm.kt b/examples/llama.android/llama/src/main/java/android/llama/cpp/LLamaAndroid.kt similarity index 97% rename from examples/llama.android/app/src/main/java/com/example/llama/Llm.kt rename to examples/llama.android/llama/src/main/java/android/llama/cpp/LLamaAndroid.kt index d86afee37..6c63e54e0 100644 --- a/examples/llama.android/app/src/main/java/com/example/llama/Llm.kt +++ b/examples/llama.android/llama/src/main/java/android/llama/cpp/LLamaAndroid.kt @@ -1,4 +1,4 @@ -package com.example.llama +package android.llama.cpp import android.util.Log import kotlinx.coroutines.CoroutineDispatcher @@ -10,7 +10,7 @@ import kotlinx.coroutines.withContext import java.util.concurrent.Executors import kotlin.concurrent.thread -class Llm { +class LLamaAndroid { private val tag: String? = this::class.simpleName private val threadLocalState: ThreadLocal = ThreadLocal.withInitial { State.Idle } @@ -165,8 +165,8 @@ class Llm { } // Enforce only one instance of Llm. - private val _instance: Llm = Llm() + private val _instance: LLamaAndroid = LLamaAndroid() - fun instance(): Llm = _instance + fun instance(): LLamaAndroid = _instance } } diff --git a/examples/llama.android/llama/src/test/java/android/llama/cpp/ExampleUnitTest.kt b/examples/llama.android/llama/src/test/java/android/llama/cpp/ExampleUnitTest.kt new file mode 100644 index 000000000..cbbb974d3 --- /dev/null +++ b/examples/llama.android/llama/src/test/java/android/llama/cpp/ExampleUnitTest.kt @@ -0,0 +1,17 @@ +package android.llama.cpp + +import org.junit.Test + +import org.junit.Assert.* + +/** + * Example local unit test, which will execute on the development machine (host). + * + * See [testing documentation](http://d.android.com/tools/testing). + */ +class ExampleUnitTest { + @Test + fun addition_isCorrect() { + assertEquals(4, 2 + 2) + } +} diff --git a/examples/llama.android/settings.gradle.kts b/examples/llama.android/settings.gradle.kts index 2ba32c4fa..c7c1a034a 100644 --- a/examples/llama.android/settings.gradle.kts +++ b/examples/llama.android/settings.gradle.kts @@ -15,3 +15,4 @@ dependencyResolutionManagement { rootProject.name = "LlamaAndroid" include(":app") +include(":llama") From 541600201e6480f54ae09e58d16b154d4b4b331d Mon Sep 17 00:00:00 2001 From: slaren Date: Tue, 14 May 2024 09:33:42 +0200 Subject: [PATCH 12/51] llama : disable pipeline parallelism with nkvo (#7265) --- llama.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/llama.cpp b/llama.cpp index 01a35dfb6..ad35e4a2e 100644 --- a/llama.cpp +++ b/llama.cpp @@ -15849,7 +15849,11 @@ struct llama_context * llama_new_context_with_model( ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead_custom(LLAMA_MAX_NODES, false)); // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary - bool pipeline_parallel = llama_get_device_count() > 1 && model->n_gpu_layers > (int)model->hparams.n_layer && model->split_mode == LLAMA_SPLIT_MODE_LAYER; + bool pipeline_parallel = + llama_get_device_count() > 1 && + model->n_gpu_layers > (int)model->hparams.n_layer && + model->split_mode == LLAMA_SPLIT_MODE_LAYER && + params.offload_kqv; #ifndef GGML_USE_CUDA // pipeline parallelism requires support for async compute and events // currently this is only implemented in the CUDA backend From 5e31828d3e35c76ecfee665bc23771a4bec1d130 Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Tue, 14 May 2024 14:27:19 +0300 Subject: [PATCH 13/51] ggml : add RPC backend (#6829) * ggml : add RPC backend The RPC backend proxies all operations to a remote server which runs a regular backend (CPU, CUDA, Metal, etc). * set TCP_NODELAY * add CI workflows * Address review comments * fix warning * implement llama_max_devices() for RPC * Address review comments * Address review comments * wrap sockfd into a struct * implement get_alignment and get_max_size * add get_device_memory * fix warning * win32 support * add README * readme : trim trailing whitespace * Address review comments * win32 fix * Address review comments * fix compile warnings on macos --- .github/workflows/build.yml | 32 ++ CMakeLists.txt | 13 + common/common.cpp | 10 + common/common.h | 1 + examples/CMakeLists.txt | 3 + examples/rpc/CMakeLists.txt | 2 + examples/rpc/README.md | 74 +++ examples/rpc/rpc-server.cpp | 70 +++ ggml-rpc.cpp | 1023 +++++++++++++++++++++++++++++++++++ ggml-rpc.h | 24 + llama.cpp | 238 ++++---- llama.h | 3 + 12 files changed, 1395 insertions(+), 98 deletions(-) create mode 100644 examples/rpc/CMakeLists.txt create mode 100644 examples/rpc/README.md create mode 100644 examples/rpc/rpc-server.cpp create mode 100644 ggml-rpc.cpp create mode 100644 ggml-rpc.h diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 8c338534d..7ac0e5f6e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -340,6 +340,36 @@ jobs: cd build ctest -L main --verbose + ubuntu-latest-cmake-rpc: + runs-on: ubuntu-latest + + continue-on-error: true + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v4 + + - name: Dependencies + id: depends + run: | + sudo apt-get update + sudo apt-get install build-essential + + - name: Build + id: cmake_build + run: | + mkdir build + cd build + cmake -DLLAMA_RPC=ON .. + cmake --build . --config Release -j $(nproc) + + - name: Test + id: cmake_test + run: | + cd build + ctest -L main --verbose + ubuntu-22-cmake-vulkan: runs-on: ubuntu-22.04 @@ -663,6 +693,8 @@ jobs: strategy: matrix: include: + - build: 'rpc' + defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_RPC=ON -DBUILD_SHARED_LIBS=ON' - build: 'noavx' defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF -DBUILD_SHARED_LIBS=ON' - build: 'avx2' diff --git a/CMakeLists.txt b/CMakeLists.txt index 1c3b5c8e4..feb6f39d0 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -123,6 +123,7 @@ set(LLAMA_METAL_MACOSX_VERSION_MIN "" CACHE STRING set(LLAMA_METAL_STD "" CACHE STRING "llama: metal standard version (-std flag)") option(LLAMA_KOMPUTE "llama: use Kompute" OFF) option(LLAMA_MPI "llama: use MPI" OFF) +option(LLAMA_RPC "llama: use RPC" OFF) option(LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF) option(LLAMA_SYCL "llama: use SYCL" OFF) option(LLAMA_SYCL_F16 "llama: use 16 bit floats for sycl calculations" OFF) @@ -494,6 +495,17 @@ if (LLAMA_MPI) endif() endif() +if (LLAMA_RPC) + add_compile_definitions(GGML_USE_RPC) + + if (WIN32) + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ws2_32) + endif() + + set(GGML_HEADERS_RPC ggml-rpc.h) + set(GGML_SOURCES_RPC ggml-rpc.cpp) +endif() + if (LLAMA_CLBLAST) find_package(CLBlast) if (CLBlast_FOUND) @@ -1176,6 +1188,7 @@ add_library(ggml OBJECT ${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL} ${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL} ${GGML_SOURCES_MPI} ${GGML_HEADERS_MPI} + ${GGML_SOURCES_RPC} ${GGML_HEADERS_RPC} ${GGML_SOURCES_EXTRA} ${GGML_HEADERS_EXTRA} ${GGML_SOURCES_SYCL} ${GGML_HEADERS_SYCL} ${GGML_SOURCES_KOMPUTE} ${GGML_HEADERS_KOMPUTE} diff --git a/common/common.cpp b/common/common.cpp index ba1ecf0e5..96130ad54 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1060,6 +1060,14 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa #endif // GGML_USE_CUDA_SYCL_VULKAN return true; } + if (arg == "--rpc") { + if (++i >= argc) { + invalid_param = true; + return true; + } + params.rpc_servers = argv[i]; + return true; + } if (arg == "--no-mmap") { params.use_mmap = false; return true; @@ -1557,6 +1565,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { printf(" -mg i, --main-gpu i the GPU to use for the model (with split-mode = none),\n"); printf(" or for intermediate results and KV (with split-mode = row) (default: %d)\n", params.main_gpu); } + printf(" --rpc SERVERS comma separated list of RPC servers\n"); printf(" --verbose-prompt print a verbose prompt before generation (default: %s)\n", params.verbose_prompt ? "true" : "false"); printf(" --no-display-prompt don't print prompt at generation (default: %s)\n", !params.display_prompt ? "true" : "false"); printf(" -gan N, --grp-attn-n N\n"); @@ -1830,6 +1839,7 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & if (params.n_gpu_layers != -1) { mparams.n_gpu_layers = params.n_gpu_layers; } + mparams.rpc_servers = params.rpc_servers.c_str(); mparams.main_gpu = params.main_gpu; mparams.split_mode = params.split_mode; mparams.tensor_split = params.tensor_split; diff --git a/common/common.h b/common/common.h index d80344f2a..566490e2f 100644 --- a/common/common.h +++ b/common/common.h @@ -82,6 +82,7 @@ struct gpt_params { float yarn_beta_slow = 1.0f; // YaRN high correction dim int32_t yarn_orig_ctx = 0; // YaRN original context length float defrag_thold = -1.0f; // KV cache defragmentation threshold + std::string rpc_servers = ""; // comma separated list of RPC servers ggml_backend_sched_eval_callback cb_eval = nullptr; void * cb_eval_user_data = nullptr; diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt index f421769cc..b40ee4ccb 100644 --- a/examples/CMakeLists.txt +++ b/examples/CMakeLists.txt @@ -49,4 +49,7 @@ else() add_subdirectory(server) endif() add_subdirectory(export-lora) + if (LLAMA_RPC) + add_subdirectory(rpc) + endif() endif() diff --git a/examples/rpc/CMakeLists.txt b/examples/rpc/CMakeLists.txt new file mode 100644 index 000000000..ae48fb98d --- /dev/null +++ b/examples/rpc/CMakeLists.txt @@ -0,0 +1,2 @@ +add_executable(rpc-server rpc-server.cpp) +target_link_libraries(rpc-server PRIVATE ggml llama) diff --git a/examples/rpc/README.md b/examples/rpc/README.md new file mode 100644 index 000000000..325d0abc4 --- /dev/null +++ b/examples/rpc/README.md @@ -0,0 +1,74 @@ +## Overview + +The `rpc-server` allows running `ggml` backend on a remote host. +The RPC backend communicates with one or several instances of `rpc-server` and offloads computations to them. +This can be used for distributed LLM inference with `llama.cpp` in the following way: + +```mermaid +flowchart TD + rpcb---|TCP|srva + rpcb---|TCP|srvb + rpcb-.-|TCP|srvn + subgraph hostn[Host N] + srvn[rpc-server]-.-backend3["Backend (CUDA,Metal,etc.)"] + end + subgraph hostb[Host B] + srvb[rpc-server]---backend2["Backend (CUDA,Metal,etc.)"] + end + subgraph hosta[Host A] + srva[rpc-server]---backend["Backend (CUDA,Metal,etc.)"] + end + subgraph host[Main Host] + ggml[llama.cpp]---rpcb[RPC backend] + end + style hostn stroke:#66,stroke-width:2px,stroke-dasharray: 5 5 +``` + +Each host can run a different backend, e.g. one with CUDA and another with Metal. +You can also run multiple `rpc-server` instances on the same host, each with a different backend. + +## Usage + +On each host, build the corresponding backend with `cmake` and add `-DLLAMA_RPC=ON` to the build options. +For example, to build the CUDA backend with RPC support: + +```bash +mkdir build-rpc-cuda +cd build-rpc-cuda +cmake .. -DLLAMA_CUDA=ON -DLLAMA_RPC=ON +cmake --build . --config Release +``` + +Then, start the `rpc-server` with the backend: + +```bash +$ bin/rpc-server 0.0.0.0 50052 +create_backend: using CUDA backend +ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no +ggml_cuda_init: CUDA_USE_TENSOR_CORES: yes +ggml_cuda_init: found 1 CUDA devices: + Device 0: NVIDIA T1200 Laptop GPU, compute capability 7.5, VMM: yes +Starting RPC server on 0.0.0.0:50052 +``` + +When using the CUDA backend, you can specify the device with the `CUDA_VISIBLE_DEVICES` environment variable, e.g.: +```bash +$ CUDA_VISIBLE_DEVICES=0 bin/rpc-server 0.0.0.0 50052 +``` +This way you can run multiple `rpc-server` instances on the same host, each with a different CUDA device. + + +On the main host build `llama.cpp` only with `-DLLAMA_RPC=ON`: + +```bash +mkdir build-rpc +cd build-rpc +cmake .. -DLLAMA_RPC=ON +cmake --build . --config Release +``` + +Finally, use the `--rpc` option to specify the host and port of each `rpc-server`: + +```bash +$ bin/main -m ../models/tinyllama-1b/ggml-model-f16.gguf -p "Hello, my name is" --repeat-penalty 1.0 -n 64 --rpc 192.168.88.10:50052,192.168.88.11:50052 -ngl 99 +``` diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp new file mode 100644 index 000000000..496af8496 --- /dev/null +++ b/examples/rpc/rpc-server.cpp @@ -0,0 +1,70 @@ +#ifdef GGML_USE_CUDA +#include "ggml-cuda.h" +#endif + +#ifdef GGML_USE_METAL +#include "ggml-metal.h" +#endif + +#include "ggml-rpc.h" +#include +#include + +static ggml_backend_t create_backend() { + ggml_backend_t backend = NULL; +#ifdef GGML_USE_CUDA + fprintf(stderr, "%s: using CUDA backend\n", __func__); + backend = ggml_backend_cuda_init(0); // init device 0 + if (!backend) { + fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__); + } +#elif GGML_USE_METAL + fprintf(stderr, "%s: using Metal backend\n", __func__); + backend = ggml_backend_metal_init(); + if (!backend) { + fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__); + } +#endif + + // if there aren't GPU Backends fallback to CPU backend + if (!backend) { + fprintf(stderr, "%s: using CPU backend\n", __func__); + backend = ggml_backend_cpu_init(); + } + return backend; +} + +static void get_backend_memory(size_t * free_mem, size_t * total_mem) { +#ifdef GGML_USE_CUDA + ggml_backend_cuda_get_device_memory(0, free_mem, total_mem); +#else + // TODO: implement for other backends + *free_mem = 1; + *total_mem = 1; +#endif +} + +int main(int argc, char * argv[]) { + if (argc < 3) { + fprintf(stderr, "Usage: %s \n", argv[0]); + return 1; + } + const char * host = argv[1]; + int port = std::stoi(argv[2]); + if (port <= 0 || port > 65535) { + fprintf(stderr, "Invalid port number: %d\n", port); + return 1; + } + ggml_backend_t backend = create_backend(); + if (!backend) { + fprintf(stderr, "Failed to create backend\n"); + return 1; + } + printf("Starting RPC server on %s:%d\n", host, port); + size_t free_mem, total_mem; + get_backend_memory(&free_mem, &total_mem); + std::string endpoint = std::string(host) + ":" + std::to_string(port); + start_rpc_server(backend, endpoint.c_str(), free_mem, total_mem); + ggml_backend_free(backend); + return 0; +} diff --git a/ggml-rpc.cpp b/ggml-rpc.cpp new file mode 100644 index 000000000..efeacb297 --- /dev/null +++ b/ggml-rpc.cpp @@ -0,0 +1,1023 @@ +#include "ggml-rpc.h" +#include "ggml.h" +#include "ggml-backend-impl.h" + +#include +#include +#include +#include +#include +#include +#ifdef _WIN32 +# define WIN32_LEAN_AND_MEAN +# ifndef NOMINMAX +# define NOMINMAX +# endif +# include +# include +#else +# include +# include +# include +# include +# include +# include +# include +#endif +#include + +#define UNUSED GGML_UNUSED + +#define GGML_DEBUG 1 +#if (GGML_DEBUG >= 1) +#define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__) +#else +#define GGML_PRINT_DEBUG(...) +#endif + +#ifdef _WIN32 +typedef SOCKET sockfd_t; +using ssize_t = __int64; +#else +typedef int sockfd_t; +#endif + +// cross-platform socket +struct socket_t { + sockfd_t fd; + socket_t(sockfd_t fd) : fd(fd) {} + ~socket_t() { +#ifdef _WIN32 + closesocket(this->fd); +#else + close(this->fd); +#endif + } +}; + +// ggml_tensor is serialized into rpc_tensor +struct rpc_tensor { + uint64_t id; + uint32_t type; + uint64_t buffer; + uint32_t ne[GGML_MAX_DIMS]; + uint32_t nb[GGML_MAX_DIMS]; + uint32_t op; + int32_t op_params[GGML_MAX_OP_PARAMS / sizeof(int32_t)]; + int32_t flags; + uint64_t src[GGML_MAX_SRC]; + uint64_t view_src; + uint64_t view_offs; + uint64_t data; + char name[GGML_MAX_NAME]; +}; + +// RPC commands +enum rpc_cmd { + ALLOC_BUFFER = 0, + GET_ALIGNMENT, + GET_MAX_SIZE, + BUFFER_GET_BASE, + FREE_BUFFER, + BUFFER_CLEAR, + SET_TENSOR, + GET_TENSOR, + COPY_TENSOR, + GRAPH_COMPUTE, + GET_DEVICE_MEMORY, +}; + +// RPC data structures + +static ggml_guid_t ggml_backend_rpc_guid() { + static ggml_guid guid = {0x99, 0x68, 0x5b, 0x6c, 0xd2, 0x83, 0x3d, 0x24, 0x25, 0x36, 0x72, 0xe1, 0x5b, 0x0e, 0x14, 0x03}; + return &guid; +} + +struct ggml_backend_rpc_buffer_type_context { + std::shared_ptr sock; + std::string name; + size_t alignment; + size_t max_size; +}; + +struct ggml_backend_rpc_context { + std::string endpoint; + std::string name; + std::shared_ptr sock; + ggml_backend_buffer_type_t buft; +}; + +struct ggml_backend_rpc_buffer_context { + std::shared_ptr sock; + std::unordered_map base_cache; + uint64_t remote_ptr; + std::string name; +}; + +// RPC helper functions + +static std::shared_ptr make_socket(sockfd_t fd) { +#ifdef _WIN32 + if (fd == INVALID_SOCKET) { + return nullptr; + } +#else + if (fd < 0) { + return nullptr; + } +#endif + return std::make_shared(fd); +} + +static bool set_no_delay(sockfd_t sockfd) { + int flag = 1; + // set TCP_NODELAY to disable Nagle's algorithm + int ret = setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, (char *)&flag, sizeof(int)); + return ret >= 0; +} + +static std::shared_ptr socket_connect(const char * host, int port) { + struct sockaddr_in addr; + auto sockfd = socket(AF_INET, SOCK_STREAM, 0); + auto sock_ptr = make_socket(sockfd); + if (sock_ptr == nullptr) { + return nullptr; + } + if (!set_no_delay(sockfd)) { + fprintf(stderr, "Failed to set TCP_NODELAY\n"); + return nullptr; + } + addr.sin_family = AF_INET; + addr.sin_port = htons(port); + struct hostent * server = gethostbyname(host); + if (server == NULL) { + fprintf(stderr, "Cannot resolve host '%s'\n", host); + return nullptr; + } + memcpy(&addr.sin_addr.s_addr, server->h_addr, server->h_length); + if (connect(sock_ptr->fd, (struct sockaddr *)&addr, sizeof(addr)) < 0) { + return nullptr; + } + return sock_ptr; +} + +static std::shared_ptr socket_accept(sockfd_t srv_sockfd) { + auto client_socket_fd = accept(srv_sockfd, NULL, NULL); + auto client_socket = make_socket(client_socket_fd); + if (client_socket == nullptr) { + return nullptr; + } + if (!set_no_delay(client_socket_fd)) { + fprintf(stderr, "Failed to set TCP_NODELAY\n"); + return nullptr; + } + return client_socket; +} + +static std::shared_ptr create_server_socket(const char * host, int port) { + auto sockfd = socket(AF_INET, SOCK_STREAM, 0); + auto sock = make_socket(sockfd); + if (sock == nullptr) { + return nullptr; + } + + struct sockaddr_in serv_addr; + serv_addr.sin_family = AF_INET; + serv_addr.sin_addr.s_addr = inet_addr(host); + serv_addr.sin_port = htons(port); + + if (bind(sockfd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) < 0) { + return nullptr; + } + if (listen(sockfd, 1) < 0) { + return nullptr; + } + return sock; +} + +static bool send_data(sockfd_t sockfd, const void * data, size_t size) { + size_t bytes_sent = 0; + while (bytes_sent < size) { + ssize_t n = send(sockfd, (const char *)data + bytes_sent, size - bytes_sent, 0); + if (n < 0) { + return false; + } + bytes_sent += n; + } + return true; +} + +static bool recv_data(sockfd_t sockfd, void * data, size_t size) { + size_t bytes_recv = 0; + while (bytes_recv < size) { + ssize_t n = recv(sockfd, (char *)data + bytes_recv, size - bytes_recv, 0); + if (n <= 0) { + return false; + } + bytes_recv += n; + } + return true; +} + +static bool parse_endpoint(const char * endpoint, std::string & host, int & port) { + std::string str(endpoint); + size_t pos = str.find(':'); + if (pos == std::string::npos) { + return false; + } + host = str.substr(0, pos); + port = std::stoi(str.substr(pos + 1)); + return true; +} + +// RPC request : | rpc_cmd (1 byte) | request_size (8 bytes) | request_data (request_size bytes) | +// RPC response: | response_size (8 bytes) | response_data (response_size bytes) | +static bool send_rpc_cmd(const std::shared_ptr & sock, enum rpc_cmd cmd, const std::vector & input, std::vector & output) { + uint8_t cmd_byte = cmd; + if (!send_data(sock->fd, &cmd_byte, sizeof(cmd_byte))) { + return false; + } + uint64_t input_size = input.size(); + if (!send_data(sock->fd, &input_size, sizeof(input_size))) { + return false; + } + if (!send_data(sock->fd, input.data(), input.size())) { + return false; + } + uint64_t output_size; + if (!recv_data(sock->fd, &output_size, sizeof(output_size))) { + return false; + } + if (output_size == 0) { + output.clear(); + return true; + } + output.resize(output_size); + if (!recv_data(sock->fd, output.data(), output_size)) { + return false; + } + return true; +} + +// RPC client-side implementation + +GGML_CALL static const char * ggml_backend_rpc_buffer_get_name(ggml_backend_buffer_t buffer) { + ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; + return ctx->name.c_str(); +} + +GGML_CALL static void ggml_backend_rpc_buffer_free_buffer(ggml_backend_buffer_t buffer) { + ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; + // input serialization format: | remote_ptr (8 bytes) | + std::vector input(sizeof(uint64_t), 0); + uint64_t remote_ptr = ctx->remote_ptr; + memcpy(input.data(), &remote_ptr, sizeof(remote_ptr)); + std::vector output; + bool status = send_rpc_cmd(ctx->sock, FREE_BUFFER, input, output); + GGML_ASSERT(status); + GGML_ASSERT(output.empty()); + delete ctx; +} + +GGML_CALL static void * ggml_backend_rpc_buffer_get_base(ggml_backend_buffer_t buffer) { + ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; + if (ctx->base_cache.find(buffer) != ctx->base_cache.end()) { + return ctx->base_cache[buffer]; + } + // input serialization format: | remote_ptr (8 bytes) | + std::vector input(sizeof(uint64_t), 0); + uint64_t remote_ptr = ctx->remote_ptr; + memcpy(input.data(), &remote_ptr, sizeof(remote_ptr)); + std::vector output; + bool status = send_rpc_cmd(ctx->sock, BUFFER_GET_BASE, input, output); + GGML_ASSERT(status); + GGML_ASSERT(output.size() == sizeof(uint64_t)); + // output serialization format: | base_ptr (8 bytes) | + uint64_t base_ptr; + memcpy(&base_ptr, output.data(), sizeof(base_ptr)); + void * base = reinterpret_cast(base_ptr); + ctx->base_cache[buffer] = base; + return base; +} + +static rpc_tensor serialize_tensor(const ggml_tensor * tensor) { + rpc_tensor result; + result.id = reinterpret_cast(tensor); + result.type = tensor->type; + if (tensor->buffer) { + ggml_backend_buffer_t buffer = tensor->buffer; + ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; + result.buffer = ctx->remote_ptr; + } else { + result.buffer = 0; + } + for (uint32_t i = 0; i < GGML_MAX_DIMS; i++) { + result.ne[i] = tensor->ne[i]; + result.nb[i] = tensor->nb[i]; + } + result.op = tensor->op; + for (uint32_t i = 0; i < GGML_MAX_OP_PARAMS / sizeof(int32_t); i++) { + result.op_params[i] = tensor->op_params[i]; + } + result.flags = tensor->flags; + for (uint32_t i = 0; i < GGML_MAX_SRC; i++) { + result.src[i] = reinterpret_cast(tensor->src[i]); + } + result.view_src = reinterpret_cast(tensor->view_src); + result.view_offs = tensor->view_offs; + result.data = reinterpret_cast(tensor->data); + snprintf(result.name, GGML_MAX_NAME, "%s", tensor->name); + return result; +} + +static ggml_tensor * deserialize_tensor(struct ggml_context * ctx, const rpc_tensor * tensor) { + ggml_tensor * result = ggml_new_tensor_4d(ctx, (ggml_type) tensor->type, + tensor->ne[0], tensor->ne[1], tensor->ne[2], tensor->ne[3]); + for (uint32_t i = 0; i < GGML_MAX_DIMS; i++) { + result->nb[i] = tensor->nb[i]; + } + result->buffer = reinterpret_cast(tensor->buffer); + result->op = (ggml_op) tensor->op; + for (uint32_t i = 0; i < GGML_MAX_OP_PARAMS / sizeof(int32_t); i++) { + result->op_params[i] = tensor->op_params[i]; + } + result->flags = tensor->flags; + result->data = reinterpret_cast(tensor->data); + ggml_set_name(result, tensor->name); + return result; +} + +GGML_CALL static void ggml_backend_rpc_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) { + UNUSED(buffer); + if (ggml_is_quantized(tensor->type)) { + // TODO: this check is due to MATRIX_ROW_PADDING in CUDA and should be generalized + GGML_ASSERT(tensor->ne[0] % 512 == 0 && "unsupported quantized tensor"); + } +} + +GGML_CALL static void ggml_backend_rpc_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) { + ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; + // input serialization format: | rpc_tensor | offset (8 bytes) | data (size bytes) | + size_t input_size = sizeof(rpc_tensor) + sizeof(uint64_t) + size; + std::vector input(input_size, 0); + rpc_tensor rpc_tensor = serialize_tensor(tensor); + memcpy(input.data(), &rpc_tensor, sizeof(rpc_tensor)); + memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset)); + memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), data, size); + std::vector output; + bool status = send_rpc_cmd(ctx->sock, SET_TENSOR, input, output); + GGML_ASSERT(status); +} + +GGML_CALL static void ggml_backend_rpc_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) { + ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; + // input serialization format: | rpc_tensor | offset (8 bytes) | size (8 bytes) | + int input_size = sizeof(rpc_tensor) + 2*sizeof(uint64_t); + std::vector input(input_size, 0); + rpc_tensor rpc_tensor = serialize_tensor(tensor); + memcpy(input.data(), &rpc_tensor, sizeof(rpc_tensor)); + memcpy(input.data() + sizeof(rpc_tensor), &offset, sizeof(offset)); + memcpy(input.data() + sizeof(rpc_tensor) + sizeof(offset), &size, sizeof(size)); + std::vector output; + bool status = send_rpc_cmd(ctx->sock, GET_TENSOR, input, output); + GGML_ASSERT(status); + GGML_ASSERT(output.size() == size); + // output serialization format: | data (size bytes) | + memcpy(data, output.data(), size); +} + +GGML_CALL static bool ggml_backend_rpc_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) { + // check if src and dst are on the same server + ggml_backend_buffer_t src_buffer = src->buffer; + ggml_backend_rpc_buffer_context * src_ctx = (ggml_backend_rpc_buffer_context *)src_buffer->context; + ggml_backend_buffer_t dst_buffer = dst->buffer; + ggml_backend_rpc_buffer_context * dst_ctx = (ggml_backend_rpc_buffer_context *)dst_buffer->context; + if (src_ctx->sock != dst_ctx->sock) { + return false; + } + ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; + // input serialization format: | rpc_tensor src | rpc_tensor dst | + int input_size = 2*sizeof(rpc_tensor); + std::vector input(input_size, 0); + rpc_tensor rpc_src = serialize_tensor(src); + rpc_tensor rpc_dst = serialize_tensor(dst); + memcpy(input.data(), &rpc_src, sizeof(rpc_src)); + memcpy(input.data() + sizeof(rpc_src), &rpc_dst, sizeof(rpc_dst)); + std::vector output; + bool status = send_rpc_cmd(ctx->sock, COPY_TENSOR, input, output); + GGML_ASSERT(status); + // output serialization format: | result (1 byte) | + GGML_ASSERT(output.size() == 1); + return output[0]; +} + +GGML_CALL static void ggml_backend_rpc_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) { + ggml_backend_rpc_buffer_context * ctx = (ggml_backend_rpc_buffer_context *)buffer->context; + // serialization format: | bufptr (8 bytes) | value (1 byte) | + int input_size = sizeof(uint64_t) + sizeof(uint8_t); + std::vector input(input_size, 0); + memcpy(input.data(), &ctx->remote_ptr, sizeof(ctx->remote_ptr)); + memcpy(input.data() + sizeof(ctx->remote_ptr), &value, sizeof(value)); + std::vector output; + bool status = send_rpc_cmd(ctx->sock, BUFFER_CLEAR, input, output); + GGML_ASSERT(status); +} + +static ggml_backend_buffer_i ggml_backend_rpc_buffer_interface = { + /* .get_name = */ ggml_backend_rpc_buffer_get_name, + /* .free_buffer = */ ggml_backend_rpc_buffer_free_buffer, + /* .get_base = */ ggml_backend_rpc_buffer_get_base, + /* .init_tensor = */ ggml_backend_rpc_buffer_init_tensor, + /* .set_tensor = */ ggml_backend_rpc_buffer_set_tensor, + /* .get_tensor = */ ggml_backend_rpc_buffer_get_tensor, + /* .cpy_tensor = */ ggml_backend_rpc_buffer_cpy_tensor, + /* .clear = */ ggml_backend_rpc_buffer_clear, + /* .reset = */ NULL, +}; + +GGML_CALL static const char * ggml_backend_rpc_buffer_type_name(ggml_backend_buffer_type_t buft) { + ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; + return buft_ctx->name.c_str(); +} + +GGML_CALL static ggml_backend_buffer_t ggml_backend_rpc_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) { + ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; + // input serialization format: | size (8 bytes) | + int input_size = sizeof(uint64_t); + std::vector input(input_size, 0); + memcpy(input.data(), &size, sizeof(size)); + std::vector output; + bool status = send_rpc_cmd(buft_ctx->sock, ALLOC_BUFFER, input, output); + GGML_ASSERT(status); + GGML_ASSERT(output.size() == 2*sizeof(uint64_t)); + // output serialization format: | remote_ptr (8 bytes) | remote_size (8 bytes) | + uint64_t remote_ptr; + memcpy(&remote_ptr, output.data(), sizeof(remote_ptr)); + size_t remote_size; + memcpy(&remote_size, output.data() + sizeof(uint64_t), sizeof(remote_size)); + + ggml_backend_buffer_t buffer = ggml_backend_buffer_init(buft, + ggml_backend_rpc_buffer_interface, + new ggml_backend_rpc_buffer_context{buft_ctx->sock, {}, remote_ptr, "RPC"}, + remote_size); + + return buffer; +} + +static size_t get_alignment(const std::shared_ptr & sock) { + // input serialization format: | 0 bytes | + std::vector input; + std::vector output; + bool status = send_rpc_cmd(sock, GET_ALIGNMENT, input, output); + GGML_ASSERT(status); + GGML_ASSERT(output.size() == sizeof(uint64_t)); + // output serialization format: | alignment (8 bytes) | + uint64_t alignment; + memcpy(&alignment, output.data(), sizeof(alignment)); + return alignment; +} + +GGML_CALL static size_t ggml_backend_rpc_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) { + ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; + return buft_ctx->alignment; +} + +static size_t get_max_size(const std::shared_ptr & sock) { + // input serialization format: | 0 bytes | + std::vector input; + std::vector output; + bool status = send_rpc_cmd(sock, GET_MAX_SIZE, input, output); + GGML_ASSERT(status); + GGML_ASSERT(output.size() == sizeof(uint64_t)); + // output serialization format: | max_size (8 bytes) | + uint64_t max_size; + memcpy(&max_size, output.data(), sizeof(max_size)); + return max_size; +} + +GGML_CALL static size_t ggml_backend_rpc_get_max_size(ggml_backend_buffer_type_t buft) { + ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; + return buft_ctx->max_size; +} + +GGML_CALL static size_t ggml_backend_rpc_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) { + UNUSED(buft); + return ggml_nbytes(tensor); +} + +GGML_CALL static bool ggml_backend_rpc_buffer_type_supports_backend(ggml_backend_buffer_type_t buft, ggml_backend_t backend) { + if (!ggml_backend_is_rpc(backend)) { + return false; + } + ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)buft->context; + ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context; + return buft_ctx->sock == rpc_ctx->sock; +} + +static ggml_backend_buffer_type_i ggml_backend_rpc_buffer_type_interface = { + /* .get_name = */ ggml_backend_rpc_buffer_type_name, + /* .alloc_buffer = */ ggml_backend_rpc_buffer_type_alloc_buffer, + /* .get_alignment = */ ggml_backend_rpc_buffer_type_get_alignment, + /* .get_max_size = */ ggml_backend_rpc_get_max_size, + /* .get_alloc_size = */ ggml_backend_rpc_buffer_type_get_alloc_size, + /* .supports_backend = */ ggml_backend_rpc_buffer_type_supports_backend, + /* .is_host = */ NULL, +}; + + +GGML_CALL static const char * ggml_backend_rpc_name(ggml_backend_t backend) { + ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context; + + return rpc_ctx->name.c_str(); +} + +GGML_CALL static void ggml_backend_rpc_free(ggml_backend_t backend) { + ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context; + ggml_backend_rpc_buffer_type_context * buft_ctx = (ggml_backend_rpc_buffer_type_context *)rpc_ctx->buft->context; + delete buft_ctx; + delete rpc_ctx->buft; + delete rpc_ctx; + delete backend; +} + +GGML_CALL static ggml_backend_buffer_type_t ggml_backend_rpc_get_default_buffer_type(ggml_backend_t backend) { + ggml_backend_rpc_context * ctx = (ggml_backend_rpc_context *)backend->context; + return ctx->buft; +} + +GGML_CALL static void ggml_backend_rpc_synchronize(ggml_backend_t backend) { + UNUSED(backend); + // this is no-op because we don't have any async operations +} + +static void add_tensor(ggml_tensor * tensor, std::vector & tensors, std::unordered_set & visited) { + if (tensor == nullptr) { + return; + } + if (visited.find(tensor) != visited.end()) { + return; + } + visited.insert(tensor); + for (int i = 0; i < GGML_MAX_SRC; i++) { + add_tensor(tensor->src[i], tensors, visited); + } + add_tensor(tensor->view_src, tensors, visited); + tensors.push_back(serialize_tensor(tensor)); +} + +static void serialize_graph(const ggml_cgraph * cgraph, std::vector & output) { + uint32_t n_nodes = cgraph->n_nodes; + std::vector tensors; + std::unordered_set visited; + for (uint32_t i = 0; i < n_nodes; i++) { + add_tensor(cgraph->nodes[i], tensors, visited); + } + // serialization format: + // | n_nodes (4 bytes) | nodes (n_nodes * sizeof(uint64_t) | n_tensors (4 bytes) | tensors (n_tensors * sizeof(rpc_tensor)) | + uint32_t n_tensors = tensors.size(); + int output_size = sizeof(uint32_t) + n_nodes * sizeof(uint64_t) + sizeof(uint32_t) + n_tensors * sizeof(rpc_tensor); + output.resize(output_size, 0); + memcpy(output.data(), &n_nodes, sizeof(n_nodes)); + uint64_t * out_nodes = (uint64_t *)(output.data() + sizeof(n_nodes)); + for (uint32_t i = 0; i < n_nodes; i++) { + out_nodes[i] = reinterpret_cast(cgraph->nodes[i]); + } + uint32_t * out_ntensors = (uint32_t *)(output.data() + sizeof(n_nodes) + n_nodes * sizeof(uint64_t)); + *out_ntensors = n_tensors; + rpc_tensor * out_tensors = (rpc_tensor *)(output.data() + sizeof(n_nodes) + n_nodes * sizeof(uint64_t) + sizeof(uint32_t)); + memcpy(out_tensors, tensors.data(), n_tensors * sizeof(rpc_tensor)); +} + +GGML_CALL static enum ggml_status ggml_backend_rpc_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) { + ggml_backend_rpc_context * rpc_ctx = (ggml_backend_rpc_context *)backend->context; + std::vector input; + serialize_graph(cgraph, input); + std::vector output; + bool status = send_rpc_cmd(rpc_ctx->sock, GRAPH_COMPUTE, input, output); + GGML_ASSERT(status); + GGML_ASSERT(output.size() == 1); + return (enum ggml_status)output[0]; +} + +GGML_CALL static bool ggml_backend_rpc_supports_op(ggml_backend_t backend, const ggml_tensor * op) { + UNUSED(backend); + UNUSED(op); + GGML_ASSERT(false && "not implemented"); + return false; +} + +static ggml_backend_i ggml_backend_rpc_interface = { + /* .get_name = */ ggml_backend_rpc_name, + /* .free = */ ggml_backend_rpc_free, + /* .get_default_buffer_type = */ ggml_backend_rpc_get_default_buffer_type, + /* .set_tensor_async = */ NULL, + /* .get_tensor_async = */ NULL, + /* .cpy_tensor_async = */ NULL, + /* .synchronize = */ ggml_backend_rpc_synchronize, + /* .graph_plan_create = */ NULL, + /* .graph_plan_free = */ NULL, + /* .graph_plan_compute = */ NULL, + /* .graph_compute = */ ggml_backend_rpc_graph_compute, + /* .supports_op = */ ggml_backend_rpc_supports_op, + /* .offload_op = */ NULL, + /* .event_new = */ NULL, + /* .event_free = */ NULL, + /* .event_record = */ NULL, + /* .event_wait = */ NULL, + /* .event_synchronize = */ NULL, +}; + +static std::unordered_map instances; + +GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint) { + ggml_backend_t backend = ggml_backend_rpc_init(endpoint); + return backend != nullptr ? ggml_backend_rpc_get_default_buffer_type(backend) : nullptr; +} + +GGML_CALL ggml_backend_t ggml_backend_rpc_init(const char * endpoint) { + std::string endpoint_str(endpoint); + if (instances.find(endpoint_str) != instances.end()) { + return instances[endpoint_str]; + } +#ifdef _WIN32 + { + WSADATA wsaData; + int res = WSAStartup(MAKEWORD(2, 2), &wsaData); + if (res != 0) { + return nullptr; + } + } +#endif + GGML_PRINT_DEBUG("Connecting to %s\n", endpoint); + std::string host; + int port; + if (!parse_endpoint(endpoint, host, port)) { + return nullptr; + } + auto sock = socket_connect(host.c_str(), port); + if (sock == nullptr) { + return nullptr; + } + size_t alignment = get_alignment(sock); + size_t max_size = get_max_size(sock); + ggml_backend_rpc_buffer_type_context * buft_ctx = new ggml_backend_rpc_buffer_type_context { + /* .sock = */ sock, + /* .name = */ "RPC" + std::to_string(sock->fd), + /* .alignment = */ alignment, + /* .max_size = */ max_size + }; + + ggml_backend_buffer_type_t buft = new ggml_backend_buffer_type { + /* .iface = */ ggml_backend_rpc_buffer_type_interface, + /* .context = */ buft_ctx + }; + + ggml_backend_rpc_context * ctx = new ggml_backend_rpc_context { + /* .endpoint = */ endpoint, + /* .name = */ "RPC" + std::to_string(sock->fd), + /* .sock = */ sock, + /* .buft = */ buft + }; + + instances[endpoint] = new ggml_backend { + /* .guid = */ ggml_backend_rpc_guid(), + /* .interface = */ ggml_backend_rpc_interface, + /* .context = */ ctx + }; + + return instances[endpoint]; +} + +GGML_API GGML_CALL bool ggml_backend_is_rpc(ggml_backend_t backend) { + return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_rpc_guid()); +} + +static void get_device_memory(const std::shared_ptr & sock, size_t * free, size_t * total) { + // input serialization format: | 0 bytes | + std::vector input; + std::vector output; + bool status = send_rpc_cmd(sock, GET_DEVICE_MEMORY, input, output); + GGML_ASSERT(status); + GGML_ASSERT(output.size() == 2*sizeof(uint64_t)); + // output serialization format: | free (8 bytes) | total (8 bytes) | + uint64_t free_mem; + memcpy(&free_mem, output.data(), sizeof(free_mem)); + uint64_t total_mem; + memcpy(&total_mem, output.data() + sizeof(uint64_t), sizeof(total_mem)); + *free = free_mem; + *total = total_mem; +} + +GGML_API GGML_CALL void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total) { + ggml_backend_t backend = ggml_backend_rpc_init(endpoint); + if (backend == nullptr) { + *free = 0; + *total = 0; + return; + } + ggml_backend_rpc_context * ctx = (ggml_backend_rpc_context *)backend->context; + get_device_memory(ctx->sock, free, total); +} + +// RPC server-side implementation + +static void rpc_alloc_buffer(ggml_backend_t backend, const std::vector & input, std::vector & output) { + // input serialization format: | size (8 bytes) | + uint64_t size; + memcpy(&size, input.data(), sizeof(size)); + ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend); + ggml_backend_buffer_t buffer = ggml_backend_buft_alloc_buffer(buft, size); + uint64_t remote_ptr = reinterpret_cast(buffer); + uint64_t remote_size = buffer->size; + GGML_PRINT_DEBUG("[%s] size: %" PRIu64 " -> remote_ptr: %" PRIx64 ", remote_size: %" PRIu64 "\n", __func__, size, remote_ptr, remote_size); + // output serialization format: | remote_ptr (8 bytes) | remote_size (8 bytes) | + output.resize(2*sizeof(uint64_t), 0); + memcpy(output.data(), &remote_ptr, sizeof(remote_ptr)); + memcpy(output.data() + sizeof(uint64_t), &remote_size, sizeof(remote_size)); +} + +static void rpc_get_alignment(ggml_backend_t backend, std::vector & output) { + ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend); + size_t alignment = ggml_backend_buft_get_alignment(buft); + GGML_PRINT_DEBUG("[%s] alignment: %lu\n", __func__, alignment); + // output serialization format: | alignment (8 bytes) | + output.resize(sizeof(uint64_t), 0); + memcpy(output.data(), &alignment, sizeof(alignment)); +} + +static void rpc_get_max_size(ggml_backend_t backend, std::vector & output) { + ggml_backend_buffer_type_t buft = ggml_backend_get_default_buffer_type(backend); + size_t max_size = ggml_backend_buft_get_max_size(buft); + GGML_PRINT_DEBUG("[%s] max_size: %lu\n", __func__, max_size); + // output serialization format: | max_size (8 bytes) | + output.resize(sizeof(uint64_t), 0); + memcpy(output.data(), &max_size, sizeof(max_size)); +} + +static void rpc_buffer_get_base(const std::vector & input, std::vector & output) { + // input serialization format: | remote_ptr (8 bytes) | + uint64_t remote_ptr; + memcpy(&remote_ptr, input.data(), sizeof(remote_ptr)); + GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, remote_ptr); + ggml_backend_buffer_t buffer = reinterpret_cast(remote_ptr); + void * base = ggml_backend_buffer_get_base(buffer); + // output serialization format: | base_ptr (8 bytes) | + uint64_t base_ptr = reinterpret_cast(base); + output.resize(sizeof(uint64_t), 0); + memcpy(output.data(), &base_ptr, sizeof(base_ptr)); +} + +static void rpc_free_buffer(const std::vector & input) { + // input serialization format: | remote_ptr (8 bytes) | + uint64_t remote_ptr; + memcpy(&remote_ptr, input.data(), sizeof(remote_ptr)); + GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 "\n", __func__, remote_ptr); + ggml_backend_buffer_t buffer = reinterpret_cast(remote_ptr); + ggml_backend_buffer_free(buffer); +} + +static void rpc_buffer_clear(const std::vector & input) { + // input serialization format: | remote_ptr (8 bytes) | value (1 byte) | + uint64_t remote_ptr; + memcpy(&remote_ptr, input.data(), sizeof(remote_ptr)); + uint8_t value; + memcpy(&value, input.data() + sizeof(uint64_t), sizeof(value)); + GGML_PRINT_DEBUG("[%s] remote_ptr: %" PRIx64 ", value: %u\n", __func__, remote_ptr, value); + ggml_backend_buffer_t buffer = reinterpret_cast(remote_ptr); + ggml_backend_buffer_clear(buffer, value); +} + +static void rpc_set_tensor(const std::vector & input) { + // serialization format: | rpc_tensor | offset (8 bytes) | data (size bytes) | + const rpc_tensor * in_tensor = (const rpc_tensor *)input.data(); + uint64_t offset; + memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset)); + size_t size = input.size() - sizeof(rpc_tensor) - sizeof(offset); + + struct ggml_init_params params { + /*.mem_size =*/ ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + struct ggml_context * ctx = ggml_init(params); + ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor); + GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %zu\n", __func__, (void*)tensor->buffer, tensor->data, offset, size); + const void * data = input.data() + sizeof(rpc_tensor) + sizeof(offset); + ggml_backend_tensor_set(tensor, data, offset, size); + ggml_free(ctx); +} + +static void rpc_get_tensor(const std::vector & input, std::vector & output) { + // serialization format: | rpc_tensor | offset (8 bytes) | size (8 bytes) | + const rpc_tensor * in_tensor = (const rpc_tensor *)input.data(); + uint64_t offset; + memcpy(&offset, input.data() + sizeof(rpc_tensor), sizeof(offset)); + uint64_t size; + memcpy(&size, input.data() + sizeof(rpc_tensor) + sizeof(offset), sizeof(size)); + + struct ggml_init_params params { + /*.mem_size =*/ ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + struct ggml_context * ctx = ggml_init(params); + ggml_tensor * tensor = deserialize_tensor(ctx, in_tensor); + GGML_PRINT_DEBUG("[%s] buffer: %p, data: %p, offset: %" PRIu64 ", size: %" PRIu64 "\n", __func__, (void*)tensor->buffer, tensor->data, offset, size); + // output serialization format: | data (size bytes) | + output.resize(size, 0); + ggml_backend_tensor_get(tensor, output.data(), offset, size); + ggml_free(ctx); +} + +static void rpc_copy_tensor(const std::vector & input, std::vector & output) { + // serialization format: | rpc_tensor src | rpc_tensor dst | + const rpc_tensor * rpc_src = (const rpc_tensor *)input.data(); + const rpc_tensor * rpc_dst = (const rpc_tensor *)(input.data() + sizeof(rpc_src)); + + struct ggml_init_params params { + /*.mem_size =*/ 2*ggml_tensor_overhead(), + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + struct ggml_context * ctx = ggml_init(params); + ggml_tensor * src = deserialize_tensor(ctx, rpc_src); + ggml_tensor * dst = deserialize_tensor(ctx, rpc_dst); + GGML_PRINT_DEBUG("[%s] src->buffer: %p, dst->buffer: %p\n", __func__, (void*)src->buffer, (void*)dst->buffer); + bool result = ggml_backend_buffer_copy_tensor(src, dst); + // output serialization format: | result (1 byte) | + output.resize(1, 0); + output[0] = result; + ggml_free(ctx); +} + +static struct ggml_tensor * create_node(uint64_t id, + struct ggml_context * ctx, + const std::unordered_map & tensor_ptrs, + std::unordered_map & tensor_map) { + if (id == 0) { + return nullptr; + } + if (tensor_map.find(id) != tensor_map.end()) { + return tensor_map[id]; + } + const rpc_tensor * tensor = tensor_ptrs.at(id); + struct ggml_tensor * result = deserialize_tensor(ctx, tensor); + tensor_map[id] = result; + for (int i = 0; i < GGML_MAX_SRC; i++) { + result->src[i] = create_node(tensor->src[i], ctx, tensor_ptrs, tensor_map); + } + result->view_src = create_node(tensor->view_src, ctx, tensor_ptrs, tensor_map); + result->view_offs = tensor->view_offs; + return result; +} + +static void rpc_graph_compute(ggml_backend_t backend, const std::vector & input, std::vector & output) { + // serialization format: + // | n_nodes (4 bytes) | nodes (n_nodes * sizeof(uint64_t) | n_tensors (4 bytes) | tensors (n_tensors * sizeof(rpc_tensor)) | + uint32_t n_nodes; + memcpy(&n_nodes, input.data(), sizeof(n_nodes)); + const uint64_t * nodes = (const uint64_t *)(input.data() + sizeof(n_nodes)); + uint32_t n_tensors; + memcpy(&n_tensors, input.data() + sizeof(n_nodes) + n_nodes*sizeof(uint64_t), sizeof(n_tensors)); + const rpc_tensor * tensors = (const rpc_tensor *)(input.data() + sizeof(n_nodes) + n_nodes*sizeof(uint64_t) + sizeof(n_tensors)); + GGML_PRINT_DEBUG("[%s] n_nodes: %u, n_tensors: %u\n", __func__, n_nodes, n_tensors); + + static size_t buf_size = ggml_tensor_overhead()*(n_nodes + n_tensors) + ggml_graph_overhead_custom(n_nodes, false); + struct ggml_init_params params = { + /*.mem_size =*/ buf_size, + /*.mem_buffer =*/ NULL, + /*.no_alloc =*/ true, + }; + struct ggml_context * ctx = ggml_init(params); + struct ggml_cgraph * graph = ggml_new_graph_custom(ctx, n_nodes, false); + graph->n_nodes = n_nodes; + std::unordered_map tensor_ptrs; + for (uint32_t i = 0; i < n_tensors; i++) { + tensor_ptrs[tensors[i].id] = &tensors[i]; + } + std::unordered_map tensor_map; + for (uint32_t i = 0; i < n_nodes; i++) { + graph->nodes[i] = create_node(nodes[i], ctx, tensor_ptrs, tensor_map); + } + ggml_status status = ggml_backend_graph_compute(backend, graph); + // output serialization format: | status (1 byte) | + output.resize(1, 0); + output[0] = status; + ggml_free(ctx); +} + +static void rpc_serve_client(ggml_backend_t backend, sockfd_t sockfd, size_t free_mem, size_t total_mem) { + while (true) { + uint8_t cmd; + if (!recv_data(sockfd, &cmd, 1)) { + break; + } + std::vector input; + std::vector output; + uint64_t input_size; + if (!recv_data(sockfd, &input_size, sizeof(input_size))) { + break; + } + input.resize(input_size); + if (!recv_data(sockfd, input.data(), input_size)) { + break; + } + switch (cmd) { + case ALLOC_BUFFER: { + rpc_alloc_buffer(backend, input, output); + break; + } + case GET_ALIGNMENT: { + rpc_get_alignment(backend, output); + break; + } + case GET_MAX_SIZE: { + rpc_get_max_size(backend, output); + break; + } + case BUFFER_GET_BASE: { + rpc_buffer_get_base(input, output); + break; + } + case FREE_BUFFER: { + rpc_free_buffer(input); + break; + } + case BUFFER_CLEAR: { + rpc_buffer_clear(input); + break; + } + case SET_TENSOR: { + rpc_set_tensor(input); + break; + } + case GET_TENSOR: { + rpc_get_tensor(input, output); + break; + } + case COPY_TENSOR: { + rpc_copy_tensor(input, output); + break; + } + case GRAPH_COMPUTE: { + rpc_graph_compute(backend, input, output); + break; + } + case GET_DEVICE_MEMORY: { + // output serialization format: | free (8 bytes) | total (8 bytes) | + output.resize(2*sizeof(uint64_t), 0); + memcpy(output.data(), &free_mem, sizeof(free_mem)); + memcpy(output.data() + sizeof(uint64_t), &total_mem, sizeof(total_mem)); + break; + } + default: { + fprintf(stderr, "Unknown command: %d\n", cmd); + return; + } + } + uint64_t output_size = output.size(); + if (!send_data(sockfd, &output_size, sizeof(output_size))) { + break; + } + if (!send_data(sockfd, output.data(), output_size)) { + break; + } + } +} + +void start_rpc_server(ggml_backend_t backend, const char * endpoint, size_t free_mem, size_t total_mem) { + std::string host; + int port; + if (!parse_endpoint(endpoint, host, port)) { + return; + } +#ifdef _WIN32 + { + WSADATA wsaData; + int res = WSAStartup(MAKEWORD(2, 2), &wsaData); + if (res != 0) { + fprintf(stderr, "WSAStartup failed: %d\n", res); + return; + } + } +#endif + auto server_socket = create_server_socket(host.c_str(), port); + if (server_socket == nullptr) { + fprintf(stderr, "Failed to create server socket\n"); + return; + } + while (true) { + auto client_socket = socket_accept(server_socket->fd); + if (client_socket == nullptr) { + fprintf(stderr, "Failed to accept client connection\n"); + return; + } + printf("Accepted client connection, free_mem=%zu, total_mem=%zu\n", free_mem, total_mem); + rpc_serve_client(backend, client_socket->fd, free_mem, total_mem); + printf("Client connection closed\n"); + } +#ifdef _WIN32 + WSACleanup(); +#endif +} diff --git a/ggml-rpc.h b/ggml-rpc.h new file mode 100644 index 000000000..aa144832a --- /dev/null +++ b/ggml-rpc.h @@ -0,0 +1,24 @@ +#pragma once + +#include "ggml.h" +#include "ggml-backend.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define GGML_RPC_MAX_SERVERS 16 + +// backend API +GGML_API GGML_CALL ggml_backend_t ggml_backend_rpc_init(const char * endpoint); +GGML_API GGML_CALL bool ggml_backend_is_rpc(ggml_backend_t backend); + +GGML_API GGML_CALL ggml_backend_buffer_type_t ggml_backend_rpc_buffer_type(const char * endpoint); + +GGML_API GGML_CALL void ggml_backend_rpc_get_device_memory(const char * endpoint, size_t * free, size_t * total); + +GGML_API GGML_CALL void start_rpc_server(ggml_backend_t backend, const char * endpoint, size_t free_mem, size_t total_mem); + +#ifdef __cplusplus +} +#endif diff --git a/llama.cpp b/llama.cpp index ad35e4a2e..7d26966e4 100644 --- a/llama.cpp +++ b/llama.cpp @@ -7,6 +7,10 @@ #include "ggml-alloc.h" #include "ggml-backend.h" +#ifdef GGML_USE_RPC +# include "ggml-rpc.h" +#endif + #ifdef GGML_USE_CUDA # include "ggml-cuda.h" #elif defined(GGML_USE_CLBLAST) @@ -1685,91 +1689,6 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer GGML_UNUSED(host_buffer); } -static ggml_backend_buffer_type_t llama_default_buffer_type_offload(int gpu) { - ggml_backend_buffer_type_t buft = nullptr; - -#ifdef GGML_USE_METAL - buft = ggml_backend_metal_buffer_type(); -#elif defined(GGML_USE_CUDA) - buft = ggml_backend_cuda_buffer_type(gpu); -#elif defined(GGML_USE_VULKAN) - buft = ggml_backend_vk_buffer_type(gpu); -#elif defined(GGML_USE_SYCL) - buft = ggml_backend_sycl_buffer_type(gpu); -#elif defined(GGML_USE_CLBLAST) - buft = ggml_backend_opencl_buffer_type(); -#elif defined(GGML_USE_KOMPUTE) - buft = ggml_backend_kompute_buffer_type(gpu); - if (buft == nullptr) { - LLAMA_LOG_WARN("%s: cannot use GPU %d, check `vulkaninfo --summary`\n", __func__, gpu); - } -#endif - - if (buft == nullptr) { - buft = llama_default_buffer_type_cpu(true); - } - return buft; - - GGML_UNUSED(gpu); -} - -static ggml_backend_buffer_type_t llama_default_buffer_type_split(int fallback_gpu, const float * tensor_split) { - ggml_backend_buffer_type_t buft = nullptr; - -#ifdef GGML_USE_CUDA - if (ggml_backend_cuda_get_device_count() > 1) { - buft = ggml_backend_cuda_split_buffer_type(tensor_split); - } -#endif - -#ifdef GGML_USE_SYCL - if (ggml_backend_sycl_get_device_count() > 1) { - buft = ggml_backend_sycl_split_buffer_type(tensor_split); - } -#endif - - if (buft == nullptr) { - buft = llama_default_buffer_type_offload(fallback_gpu); - } - return buft; - - GGML_UNUSED(tensor_split); -} - -static size_t llama_get_device_count() { -#if defined(GGML_USE_CUDA) - return ggml_backend_cuda_get_device_count(); -#elif defined(GGML_USE_SYCL) - return ggml_backend_sycl_get_device_count(); -#elif defined(GGML_USE_VULKAN) - return ggml_backend_vk_get_device_count(); -#else - return 1; -#endif -} - -static size_t llama_get_device_memory(int device) { -#if defined(GGML_USE_CUDA) - size_t total; - size_t free; - ggml_backend_cuda_get_device_memory(device, &free, &total); - return free; -#elif defined(GGML_USE_SYCL) - size_t total; - size_t free; - ggml_backend_sycl_get_device_memory(device, &free, &total); - return free; -#elif defined(GGML_USE_VULKAN) - size_t total; - size_t free; - ggml_backend_vk_get_device_memory(device, &free, &total); - return free; -#else - return 1; - GGML_UNUSED(device); -#endif -} - // // globals // @@ -2210,6 +2129,8 @@ struct llama_model { int main_gpu; int n_gpu_layers; + std::vector rpc_servers; + // gguf metadata std::unordered_map gguf_kv; @@ -2353,6 +2274,104 @@ struct llama_context { #endif }; +static ggml_backend_buffer_type_t llama_default_buffer_type_offload(const llama_model & model, int gpu) { + ggml_backend_buffer_type_t buft = nullptr; + +#ifdef GGML_USE_RPC + std::string endpoint = model.rpc_servers[gpu]; + buft = ggml_backend_rpc_buffer_type(endpoint.c_str()); +#elif defined(GGML_USE_METAL) + buft = ggml_backend_metal_buffer_type(); +#elif defined(GGML_USE_CUDA) + buft = ggml_backend_cuda_buffer_type(gpu); +#elif defined(GGML_USE_VULKAN) + buft = ggml_backend_vk_buffer_type(gpu); +#elif defined(GGML_USE_SYCL) + buft = ggml_backend_sycl_buffer_type(gpu); +#elif defined(GGML_USE_CLBLAST) + buft = ggml_backend_opencl_buffer_type(); +#elif defined(GGML_USE_KOMPUTE) + buft = ggml_backend_kompute_buffer_type(gpu); + if (buft == nullptr) { + LLAMA_LOG_WARN("%s: cannot use GPU %d, check `vulkaninfo --summary`\n", __func__, gpu); + } +#endif + + if (buft == nullptr) { + buft = llama_default_buffer_type_cpu(true); + } + return buft; + GGML_UNUSED(model); + GGML_UNUSED(gpu); +} + +static ggml_backend_buffer_type_t llama_default_buffer_type_split(const llama_model & model, int fallback_gpu, const float * tensor_split) { + ggml_backend_buffer_type_t buft = nullptr; + +#ifdef GGML_USE_CUDA + if (ggml_backend_cuda_get_device_count() > 1) { + buft = ggml_backend_cuda_split_buffer_type(tensor_split); + } +#endif + +#ifdef GGML_USE_SYCL + if (ggml_backend_sycl_get_device_count() > 1) { + buft = ggml_backend_sycl_split_buffer_type(tensor_split); + } +#endif + + if (buft == nullptr) { + buft = llama_default_buffer_type_offload(model, fallback_gpu); + } + return buft; + + GGML_UNUSED(tensor_split); +} + +static size_t llama_get_device_count(const llama_model & model) { +#if defined(GGML_USE_RPC) + return model.rpc_servers.size(); +#elif defined(GGML_USE_CUDA) + return ggml_backend_cuda_get_device_count(); +#elif defined(GGML_USE_SYCL) + return ggml_backend_sycl_get_device_count(); +#elif defined(GGML_USE_VULKAN) + return ggml_backend_vk_get_device_count(); +#else + return 1; +#endif + GGML_UNUSED(model); +} + +static size_t llama_get_device_memory(const llama_model & model, int device) { +#if defined(GGML_USE_RPC) + size_t total; + size_t free; + std::string endpoint = model.rpc_servers[device]; + ggml_backend_rpc_get_device_memory(endpoint.c_str(), &free, &total); + return free; +#elif defined(GGML_USE_CUDA) + size_t total; + size_t free; + ggml_backend_cuda_get_device_memory(device, &free, &total); + return free; +#elif defined(GGML_USE_SYCL) + size_t total; + size_t free; + ggml_backend_sycl_get_device_memory(device, &free, &total); + return free; +#elif defined(GGML_USE_VULKAN) + size_t total; + size_t free; + ggml_backend_vk_get_device_memory(device, &free, &total); + return free; +#else + return 1; +#endif + GGML_UNUSED(model); + GGML_UNUSED(device); +} + // // kv cache helpers // @@ -4791,13 +4810,13 @@ static bool llm_load_tensors( if (split_mode == LLAMA_SPLIT_MODE_LAYER) { // calculate the split points - int device_count = llama_get_device_count(); + int device_count = llama_get_device_count(model); bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + device_count, [](float x) { return x == 0.0f; }); std::vector splits(device_count); if (all_zero) { // default split, by free memory for (int i = 0; i < device_count; ++i) { - splits[i] = llama_get_device_memory(i); + splits[i] = llama_get_device_memory(model, i); } } else { std::copy(tensor_split, tensor_split + device_count, splits.begin()); @@ -4817,35 +4836,35 @@ static bool llm_load_tensors( int act_gpu_layers = std::min(n_gpu_layers, (int)n_layer + 1); for (int64_t i = i_gpu_start; i < n_layer; ++i) { int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(i - i_gpu_start)/act_gpu_layers) - splits.begin(); - model.buft_layer[i] = llama_default_buffer_type_offload(layer_gpu); + model.buft_layer[i] = llama_default_buffer_type_offload(model, layer_gpu); } // assign the output layer if (n_gpu_layers > n_layer) { int layer_gpu = std::upper_bound(splits.begin(), splits.begin() + device_count, float(act_gpu_layers - 1)/act_gpu_layers) - splits.begin(); - model.buft_output = llama_default_buffer_type_offload(layer_gpu); + model.buft_output = llama_default_buffer_type_offload(model, layer_gpu); } else { model.buft_output = llama_default_buffer_type_cpu(true); } } else { ggml_backend_buffer_type_t split_buft; if (split_mode == LLAMA_SPLIT_MODE_ROW) { - split_buft = llama_default_buffer_type_split(main_gpu, tensor_split); + split_buft = llama_default_buffer_type_split(model, main_gpu, tensor_split); } else { // LLAMA_SPLIT_MODE_NONE or LLAMA_SPLIT_MODE_LAYER in backends where it is not supported - split_buft = llama_default_buffer_type_offload(main_gpu); + split_buft = llama_default_buffer_type_offload(model, main_gpu); } // assign the repeating layers for (int64_t i = i_gpu_start; i < n_layer; ++i) { model.buft_layer[i] = { split_buft, - llama_default_buffer_type_offload(main_gpu) + llama_default_buffer_type_offload(model, main_gpu) }; } // assign the output layer if (n_gpu_layers > n_layer) { model.buft_output = { split_buft, - llama_default_buffer_type_offload(main_gpu) + llama_default_buffer_type_offload(model, main_gpu) }; } else { model.buft_output = llama_default_buffer_type_cpu(true); @@ -15390,6 +15409,7 @@ struct llama_model_params llama_model_default_params() { /*.split_mode =*/ LLAMA_SPLIT_MODE_LAYER, /*.main_gpu =*/ 0, /*.tensor_split =*/ nullptr, + /*.rpc_servers =*/ nullptr, /*.progress_callback =*/ nullptr, /*.progress_callback_user_data =*/ nullptr, /*.kv_overrides =*/ nullptr, @@ -15460,7 +15480,9 @@ struct llama_model_quantize_params llama_model_quantize_default_params() { } size_t llama_max_devices(void) { -#if defined(GGML_USE_METAL) +#if defined(GGML_USE_RPC) + return GGML_RPC_MAX_SERVERS; +#elif defined(GGML_USE_METAL) return 1; #elif defined(GGML_USE_CUDA) return GGML_CUDA_MAX_DEVICES; @@ -15483,7 +15505,7 @@ bool llama_supports_mlock(void) { bool llama_supports_gpu_offload(void) { #if defined(GGML_USE_CUDA) || defined(GGML_USE_CLBLAST) || defined(GGML_USE_METAL) || defined(GGML_USE_VULKAN) || \ - defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE) + defined(GGML_USE_SYCL) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_RPC) // Defined when llama.cpp is compiled with support for offloading model layers to GPU. return true; #else @@ -15546,7 +15568,17 @@ struct llama_model * llama_load_model_from_file( return true; }; } - + if (params.rpc_servers != nullptr) { + // split the servers set them into model->rpc_servers + std::string servers(params.rpc_servers); + size_t pos = 0; + while ((pos = servers.find(",")) != std::string::npos) { + std::string server = servers.substr(0, pos); + model->rpc_servers.push_back(server); + servers.erase(0, pos + 1); + } + model->rpc_servers.push_back(servers); + } int status = llama_model_load(path_model, *model, params); GGML_ASSERT(status <= 0); if (status < 0) { @@ -15693,7 +15725,17 @@ struct llama_context * llama_new_context_with_model( if (!hparams.vocab_only) { // initialize backends -#ifdef GGML_USE_METAL +#if defined(GGML_USE_RPC) + for (auto & server : model->rpc_servers) { + ggml_backend_t backend = ggml_backend_rpc_init(server.c_str()); + if (backend == nullptr) { + LLAMA_LOG_ERROR("%s: failed to connect RPC backend to %s\n", __func__, server.c_str()); + llama_free(ctx); + return nullptr; + } + ctx->backends.push_back(backend); + } +#elif defined(GGML_USE_METAL) if (model->n_gpu_layers > 0) { ctx->backend_metal = ggml_backend_metal_init(); if (ctx->backend_metal == nullptr) { @@ -15850,7 +15892,7 @@ struct llama_context * llama_new_context_with_model( // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary bool pipeline_parallel = - llama_get_device_count() > 1 && + llama_get_device_count(*model) > 1 && model->n_gpu_layers > (int)model->hparams.n_layer && model->split_mode == LLAMA_SPLIT_MODE_LAYER && params.offload_kqv; diff --git a/llama.h b/llama.h index 0b2e708d0..612e32c4e 100644 --- a/llama.h +++ b/llama.h @@ -242,6 +242,9 @@ extern "C" { // proportion of the model (layers or rows) to offload to each GPU, size: llama_max_devices() const float * tensor_split; + // comma separated list of RPC servers to use for offloading + const char * rpc_servers; + // Called with a progress value between 0.0 and 1.0. Pass NULL to disable. // If the provided progress_callback returns true, model loading continues. // If it returns false, model loading is immediately aborted. From 1265c670fd8e41e1947352c96c5179adda97fb2c Mon Sep 17 00:00:00 2001 From: Brian Date: Tue, 14 May 2024 23:10:39 +1000 Subject: [PATCH 14/51] Revert "move ndk code to a new library (#6951)" (#7282) This reverts commit efc8f767c8c8c749a245dd96ad4e2f37c164b54c. --- examples/llama.android/app/build.gradle.kts | 25 +++++++++- .../src/main/cpp}/CMakeLists.txt | 2 +- .../src/main/cpp/llama-android.cpp | 28 +++++------ .../src/main/java/com/example/llama/Llm.kt} | 8 +-- .../java/com/example/llama/MainViewModel.kt | 13 +++-- examples/llama.android/build.gradle.kts | 1 - examples/llama.android/llama/.gitignore | 1 - .../llama.android/llama/consumer-rules.pro | 0 .../llama.android/llama/proguard-rules.pro | 21 -------- .../llama/cpp/ExampleInstrumentedTest.kt | 24 --------- .../llama/src/main/AndroidManifest.xml | 4 -- .../llama/src/main/cpp/CMakeLists.txt | 49 ------------------- .../java/android/llama/cpp/ExampleUnitTest.kt | 17 ------- examples/llama.android/settings.gradle.kts | 1 - 14 files changed, 49 insertions(+), 145 deletions(-) rename examples/llama.android/{llama => app/src/main/cpp}/CMakeLists.txt (98%) rename examples/llama.android/{llama => app}/src/main/cpp/llama-android.cpp (92%) rename examples/llama.android/{llama/src/main/java/android/llama/cpp/LLamaAndroid.kt => app/src/main/java/com/example/llama/Llm.kt} (97%) delete mode 100644 examples/llama.android/llama/.gitignore delete mode 100644 examples/llama.android/llama/consumer-rules.pro delete mode 100644 examples/llama.android/llama/proguard-rules.pro delete mode 100644 examples/llama.android/llama/src/androidTest/java/android/llama/cpp/ExampleInstrumentedTest.kt delete mode 100644 examples/llama.android/llama/src/main/AndroidManifest.xml delete mode 100644 examples/llama.android/llama/src/main/cpp/CMakeLists.txt delete mode 100644 examples/llama.android/llama/src/test/java/android/llama/cpp/ExampleUnitTest.kt diff --git a/examples/llama.android/app/build.gradle.kts b/examples/llama.android/app/build.gradle.kts index 8d1b37195..d42140efe 100644 --- a/examples/llama.android/app/build.gradle.kts +++ b/examples/llama.android/app/build.gradle.kts @@ -7,6 +7,8 @@ android { namespace = "com.example.llama" compileSdk = 34 + ndkVersion = "26.1.10909125" + defaultConfig { applicationId = "com.example.llama" minSdk = 33 @@ -18,6 +20,17 @@ android { vectorDrawables { useSupportLibrary = true } + ndk { + // Add NDK properties if wanted, e.g. + // abiFilters += listOf("arm64-v8a") + } + externalNativeBuild { + cmake { + arguments += "-DCMAKE_BUILD_TYPE=Release" + cppFlags += listOf() + arguments += listOf() + } + } } buildTypes { @@ -42,6 +55,17 @@ android { composeOptions { kotlinCompilerExtensionVersion = "1.5.1" } + packaging { + resources { + excludes += "/META-INF/{AL2.0,LGPL2.1}" + } + } + externalNativeBuild { + cmake { + path = file("src/main/cpp/CMakeLists.txt") + version = "3.22.1" + } + } } dependencies { @@ -54,7 +78,6 @@ dependencies { implementation("androidx.compose.ui:ui-graphics") implementation("androidx.compose.ui:ui-tooling-preview") implementation("androidx.compose.material3:material3") - implementation(project(":llama")) testImplementation("junit:junit:4.13.2") androidTestImplementation("androidx.test.ext:junit:1.1.5") androidTestImplementation("androidx.test.espresso:espresso-core:3.5.1") diff --git a/examples/llama.android/llama/CMakeLists.txt b/examples/llama.android/app/src/main/cpp/CMakeLists.txt similarity index 98% rename from examples/llama.android/llama/CMakeLists.txt rename to examples/llama.android/app/src/main/cpp/CMakeLists.txt index bb5738ae3..85139329a 100644 --- a/examples/llama.android/llama/CMakeLists.txt +++ b/examples/llama.android/app/src/main/cpp/CMakeLists.txt @@ -37,7 +37,7 @@ FetchContent_MakeAvailable(llama) # used in the AndroidManifest.xml file. add_library(${CMAKE_PROJECT_NAME} SHARED # List C/C++ source files with relative paths to this CMakeLists.txt. - llama-android.cpp) + llama-android.cpp) # Specifies libraries CMake should link to your target library. You # can link libraries from various origins, such as libraries defined in this diff --git a/examples/llama.android/llama/src/main/cpp/llama-android.cpp b/examples/llama.android/app/src/main/cpp/llama-android.cpp similarity index 92% rename from examples/llama.android/llama/src/main/cpp/llama-android.cpp rename to examples/llama.android/app/src/main/cpp/llama-android.cpp index 874158ef0..4af9de303 100644 --- a/examples/llama.android/llama/src/main/cpp/llama-android.cpp +++ b/examples/llama.android/app/src/main/cpp/llama-android.cpp @@ -81,7 +81,7 @@ static void log_callback(ggml_log_level level, const char * fmt, void * data) { extern "C" JNIEXPORT jlong JNICALL -Java_android_llama_cpp_LLamaAndroid_load_1model(JNIEnv *env, jobject, jstring filename) { +Java_com_example_llama_Llm_load_1model(JNIEnv *env, jobject, jstring filename) { llama_model_params model_params = llama_model_default_params(); auto path_to_model = env->GetStringUTFChars(filename, 0); @@ -101,13 +101,13 @@ Java_android_llama_cpp_LLamaAndroid_load_1model(JNIEnv *env, jobject, jstring fi extern "C" JNIEXPORT void JNICALL -Java_android_llama_cpp_LLamaAndroid_free_1model(JNIEnv *, jobject, jlong model) { +Java_com_example_llama_Llm_free_1model(JNIEnv *, jobject, jlong model) { llama_free_model(reinterpret_cast(model)); } extern "C" JNIEXPORT jlong JNICALL -Java_android_llama_cpp_LLamaAndroid_new_1context(JNIEnv *env, jobject, jlong jmodel) { +Java_com_example_llama_Llm_new_1context(JNIEnv *env, jobject, jlong jmodel) { auto model = reinterpret_cast(jmodel); if (!model) { @@ -139,25 +139,25 @@ Java_android_llama_cpp_LLamaAndroid_new_1context(JNIEnv *env, jobject, jlong jmo extern "C" JNIEXPORT void JNICALL -Java_android_llama_cpp_LLamaAndroid_free_1context(JNIEnv *, jobject, jlong context) { +Java_com_example_llama_Llm_free_1context(JNIEnv *, jobject, jlong context) { llama_free(reinterpret_cast(context)); } extern "C" JNIEXPORT void JNICALL -Java_android_llama_cpp_LLamaAndroid_backend_1free(JNIEnv *, jobject) { +Java_com_example_llama_Llm_backend_1free(JNIEnv *, jobject) { llama_backend_free(); } extern "C" JNIEXPORT void JNICALL -Java_android_llama_cpp_LLamaAndroid_log_1to_1android(JNIEnv *, jobject) { +Java_com_example_llama_Llm_log_1to_1android(JNIEnv *, jobject) { llama_log_set(log_callback, NULL); } extern "C" JNIEXPORT jstring JNICALL -Java_android_llama_cpp_LLamaAndroid_bench_1model( +Java_com_example_llama_Llm_bench_1model( JNIEnv *env, jobject, jlong context_pointer, @@ -271,13 +271,13 @@ Java_android_llama_cpp_LLamaAndroid_bench_1model( extern "C" JNIEXPORT void JNICALL -Java_android_llama_cpp_LLamaAndroid_free_1batch(JNIEnv *, jobject, jlong batch_pointer) { +Java_com_example_llama_Llm_free_1batch(JNIEnv *, jobject, jlong batch_pointer) { llama_batch_free(*reinterpret_cast(batch_pointer)); } extern "C" JNIEXPORT jlong JNICALL -Java_android_llama_cpp_LLamaAndroid_new_1batch(JNIEnv *, jobject, jint n_tokens, jint embd, jint n_seq_max) { +Java_com_example_llama_Llm_new_1batch(JNIEnv *, jobject, jint n_tokens, jint embd, jint n_seq_max) { // Source: Copy of llama.cpp:llama_batch_init but heap-allocated. @@ -313,19 +313,19 @@ Java_android_llama_cpp_LLamaAndroid_new_1batch(JNIEnv *, jobject, jint n_tokens, extern "C" JNIEXPORT void JNICALL -Java_android_llama_cpp_LLamaAndroid_backend_1init(JNIEnv *, jobject) { +Java_com_example_llama_Llm_backend_1init(JNIEnv *, jobject) { llama_backend_init(); } extern "C" JNIEXPORT jstring JNICALL -Java_android_llama_cpp_LLamaAndroid_system_1info(JNIEnv *env, jobject) { +Java_com_example_llama_Llm_system_1info(JNIEnv *env, jobject) { return env->NewStringUTF(llama_print_system_info()); } extern "C" JNIEXPORT jint JNICALL -Java_android_llama_cpp_LLamaAndroid_completion_1init( +Java_com_example_llama_Llm_completion_1init( JNIEnv *env, jobject, jlong context_pointer, @@ -376,7 +376,7 @@ Java_android_llama_cpp_LLamaAndroid_completion_1init( extern "C" JNIEXPORT jstring JNICALL -Java_android_llama_cpp_LLamaAndroid_completion_1loop( +Java_com_example_llama_Llm_completion_1loop( JNIEnv * env, jobject, jlong context_pointer, @@ -438,6 +438,6 @@ Java_android_llama_cpp_LLamaAndroid_completion_1loop( extern "C" JNIEXPORT void JNICALL -Java_android_llama_cpp_LLamaAndroid_kv_1cache_1clear(JNIEnv *, jobject, jlong context) { +Java_com_example_llama_Llm_kv_1cache_1clear(JNIEnv *, jobject, jlong context) { llama_kv_cache_clear(reinterpret_cast(context)); } diff --git a/examples/llama.android/llama/src/main/java/android/llama/cpp/LLamaAndroid.kt b/examples/llama.android/app/src/main/java/com/example/llama/Llm.kt similarity index 97% rename from examples/llama.android/llama/src/main/java/android/llama/cpp/LLamaAndroid.kt rename to examples/llama.android/app/src/main/java/com/example/llama/Llm.kt index 6c63e54e0..d86afee37 100644 --- a/examples/llama.android/llama/src/main/java/android/llama/cpp/LLamaAndroid.kt +++ b/examples/llama.android/app/src/main/java/com/example/llama/Llm.kt @@ -1,4 +1,4 @@ -package android.llama.cpp +package com.example.llama import android.util.Log import kotlinx.coroutines.CoroutineDispatcher @@ -10,7 +10,7 @@ import kotlinx.coroutines.withContext import java.util.concurrent.Executors import kotlin.concurrent.thread -class LLamaAndroid { +class Llm { private val tag: String? = this::class.simpleName private val threadLocalState: ThreadLocal = ThreadLocal.withInitial { State.Idle } @@ -165,8 +165,8 @@ class LLamaAndroid { } // Enforce only one instance of Llm. - private val _instance: LLamaAndroid = LLamaAndroid() + private val _instance: Llm = Llm() - fun instance(): LLamaAndroid = _instance + fun instance(): Llm = _instance } } diff --git a/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt b/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt index 45ac29938..be95e2221 100644 --- a/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt +++ b/examples/llama.android/app/src/main/java/com/example/llama/MainViewModel.kt @@ -1,6 +1,5 @@ package com.example.llama -import android.llama.cpp.LLamaAndroid import android.util.Log import androidx.compose.runtime.getValue import androidx.compose.runtime.mutableStateOf @@ -10,7 +9,7 @@ import androidx.lifecycle.viewModelScope import kotlinx.coroutines.flow.catch import kotlinx.coroutines.launch -class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instance()): ViewModel() { +class MainViewModel(private val llm: Llm = Llm.instance()): ViewModel() { companion object { @JvmStatic private val NanosPerSecond = 1_000_000_000.0 @@ -29,7 +28,7 @@ class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instan viewModelScope.launch { try { - llamaAndroid.unload() + llm.unload() } catch (exc: IllegalStateException) { messages += exc.message!! } @@ -45,7 +44,7 @@ class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instan messages += "" viewModelScope.launch { - llamaAndroid.send(text) + llm.send(text) .catch { Log.e(tag, "send() failed", it) messages += it.message!! @@ -58,7 +57,7 @@ class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instan viewModelScope.launch { try { val start = System.nanoTime() - val warmupResult = llamaAndroid.bench(pp, tg, pl, nr) + val warmupResult = llm.bench(pp, tg, pl, nr) val end = System.nanoTime() messages += warmupResult @@ -71,7 +70,7 @@ class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instan return@launch } - messages += llamaAndroid.bench(512, 128, 1, 3) + messages += llm.bench(512, 128, 1, 3) } catch (exc: IllegalStateException) { Log.e(tag, "bench() failed", exc) messages += exc.message!! @@ -82,7 +81,7 @@ class MainViewModel(private val llamaAndroid: LLamaAndroid = LLamaAndroid.instan fun load(pathToModel: String) { viewModelScope.launch { try { - llamaAndroid.load(pathToModel) + llm.load(pathToModel) messages += "Loaded $pathToModel" } catch (exc: IllegalStateException) { Log.e(tag, "load() failed", exc) diff --git a/examples/llama.android/build.gradle.kts b/examples/llama.android/build.gradle.kts index acd1ada7d..50ebc8211 100644 --- a/examples/llama.android/build.gradle.kts +++ b/examples/llama.android/build.gradle.kts @@ -2,5 +2,4 @@ plugins { id("com.android.application") version "8.2.0" apply false id("org.jetbrains.kotlin.android") version "1.9.0" apply false - id("com.android.library") version "8.2.0" apply false } diff --git a/examples/llama.android/llama/.gitignore b/examples/llama.android/llama/.gitignore deleted file mode 100644 index 796b96d1c..000000000 --- a/examples/llama.android/llama/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/build diff --git a/examples/llama.android/llama/consumer-rules.pro b/examples/llama.android/llama/consumer-rules.pro deleted file mode 100644 index e69de29bb..000000000 diff --git a/examples/llama.android/llama/proguard-rules.pro b/examples/llama.android/llama/proguard-rules.pro deleted file mode 100644 index f1b424510..000000000 --- a/examples/llama.android/llama/proguard-rules.pro +++ /dev/null @@ -1,21 +0,0 @@ -# Add project specific ProGuard rules here. -# You can control the set of applied configuration files using the -# proguardFiles setting in build.gradle. -# -# For more details, see -# http://developer.android.com/guide/developing/tools/proguard.html - -# If your project uses WebView with JS, uncomment the following -# and specify the fully qualified class name to the JavaScript interface -# class: -#-keepclassmembers class fqcn.of.javascript.interface.for.webview { -# public *; -#} - -# Uncomment this to preserve the line number information for -# debugging stack traces. -#-keepattributes SourceFile,LineNumberTable - -# If you keep the line number information, uncomment this to -# hide the original source file name. -#-renamesourcefileattribute SourceFile diff --git a/examples/llama.android/llama/src/androidTest/java/android/llama/cpp/ExampleInstrumentedTest.kt b/examples/llama.android/llama/src/androidTest/java/android/llama/cpp/ExampleInstrumentedTest.kt deleted file mode 100644 index 05d6ab5d2..000000000 --- a/examples/llama.android/llama/src/androidTest/java/android/llama/cpp/ExampleInstrumentedTest.kt +++ /dev/null @@ -1,24 +0,0 @@ -package android.llama.cpp - -import androidx.test.platform.app.InstrumentationRegistry -import androidx.test.ext.junit.runners.AndroidJUnit4 - -import org.junit.Test -import org.junit.runner.RunWith - -import org.junit.Assert.* - -/** - * Instrumented test, which will execute on an Android device. - * - * See [testing documentation](http://d.android.com/tools/testing). - */ -@RunWith(AndroidJUnit4::class) -class ExampleInstrumentedTest { - @Test - fun useAppContext() { - // Context of the app under test. - val appContext = InstrumentationRegistry.getInstrumentation().targetContext - assertEquals("android.llama.cpp.test", appContext.packageName) - } -} diff --git a/examples/llama.android/llama/src/main/AndroidManifest.xml b/examples/llama.android/llama/src/main/AndroidManifest.xml deleted file mode 100644 index 8bdb7e14b..000000000 --- a/examples/llama.android/llama/src/main/AndroidManifest.xml +++ /dev/null @@ -1,4 +0,0 @@ - - - - diff --git a/examples/llama.android/llama/src/main/cpp/CMakeLists.txt b/examples/llama.android/llama/src/main/cpp/CMakeLists.txt deleted file mode 100644 index 42ebaad49..000000000 --- a/examples/llama.android/llama/src/main/cpp/CMakeLists.txt +++ /dev/null @@ -1,49 +0,0 @@ -# For more information about using CMake with Android Studio, read the -# documentation: https://d.android.com/studio/projects/add-native-code.html. -# For more examples on how to use CMake, see https://github.com/android/ndk-samples. - -# Sets the minimum CMake version required for this project. -cmake_minimum_required(VERSION 3.22.1) - -# Declares the project name. The project name can be accessed via ${ PROJECT_NAME}, -# Since this is the top level CMakeLists.txt, the project name is also accessible -# with ${CMAKE_PROJECT_NAME} (both CMake variables are in-sync within the top level -# build script scope). -project("llama-android") - -include(FetchContent) -FetchContent_Declare( - llama - GIT_REPOSITORY https://github.com/ggerganov/llama.cpp - GIT_TAG master -) - -# Also provides "common" -FetchContent_MakeAvailable(llama) - -# Creates and names a library, sets it as either STATIC -# or SHARED, and provides the relative paths to its source code. -# You can define multiple libraries, and CMake builds them for you. -# Gradle automatically packages shared libraries with your APK. -# -# In this top level CMakeLists.txt, ${CMAKE_PROJECT_NAME} is used to define -# the target library name; in the sub-module's CMakeLists.txt, ${PROJECT_NAME} -# is preferred for the same purpose. -# -# In order to load a library into your app from Java/Kotlin, you must call -# System.loadLibrary() and pass the name of the library defined here; -# for GameActivity/NativeActivity derived applications, the same library name must be -# used in the AndroidManifest.xml file. -add_library(${CMAKE_PROJECT_NAME} SHARED - # List C/C++ source files with relative paths to this CMakeLists.txt. - llama-android.cpp) - -# Specifies libraries CMake should link to your target library. You -# can link libraries from various origins, such as libraries defined in this -# build script, prebuilt third-party libraries, or Android system libraries. -target_link_libraries(${CMAKE_PROJECT_NAME} - # List libraries link to the target library - llama - common - android - log) diff --git a/examples/llama.android/llama/src/test/java/android/llama/cpp/ExampleUnitTest.kt b/examples/llama.android/llama/src/test/java/android/llama/cpp/ExampleUnitTest.kt deleted file mode 100644 index cbbb974d3..000000000 --- a/examples/llama.android/llama/src/test/java/android/llama/cpp/ExampleUnitTest.kt +++ /dev/null @@ -1,17 +0,0 @@ -package android.llama.cpp - -import org.junit.Test - -import org.junit.Assert.* - -/** - * Example local unit test, which will execute on the development machine (host). - * - * See [testing documentation](http://d.android.com/tools/testing). - */ -class ExampleUnitTest { - @Test - fun addition_isCorrect() { - assertEquals(4, 2 + 2) - } -} diff --git a/examples/llama.android/settings.gradle.kts b/examples/llama.android/settings.gradle.kts index c7c1a034a..2ba32c4fa 100644 --- a/examples/llama.android/settings.gradle.kts +++ b/examples/llama.android/settings.gradle.kts @@ -15,4 +15,3 @@ dependencyResolutionManagement { rootProject.name = "LlamaAndroid" include(":app") -include(":llama") From 4f0263633b40e94e8b69fd6e7e4395cfedfd5c12 Mon Sep 17 00:00:00 2001 From: Steve Grubb Date: Tue, 14 May 2024 10:11:24 -0400 Subject: [PATCH 15/51] server: free sampling contexts on exit (#7264) * server: free sampling contexts on exit This cleans up last leak found by the address sanitizer. * fix whitespace * fix whitespace --- examples/server/server.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index ceaeb1f76..7e0d068f8 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -671,6 +671,13 @@ struct server_context { model = nullptr; } + // Clear any sampling context + for (server_slot & slot : slots) { + if (slot.ctx_sampling != nullptr) { + llama_sampling_free(slot.ctx_sampling); + } + } + llama_batch_free(batch); } From 0d26d8ccd8caebab75af697c0275f599075fdacf Mon Sep 17 00:00:00 2001 From: Hong Bo PENG Date: Sun, 12 May 2024 17:17:18 +0800 Subject: [PATCH 16/51] ggml : optimize for ppc64le using VSX intrinsics (ggml/784) * optimize for ppc64le using VSX intrinsics * 1. code clean up by removing comments about overflow concern. 2. fix typo in suffix of scaling. * Continue to fix typo in suffix of scaling for QK_K <> 256 --------- Co-authored-by: Georgi Gerganov --- ggml-quants.c | 2169 ++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 2167 insertions(+), 2 deletions(-) diff --git a/ggml-quants.c b/ggml-quants.c index 00334c5fe..f711bd013 100644 --- a/ggml-quants.c +++ b/ggml-quants.c @@ -241,7 +241,7 @@ static inline float hsum_float_4x4(const __m128 a, const __m128 b, const __m128 #endif // __AVX__ || __AVX2__ || __AVX512F__ #endif // defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__) -#if defined(__ARM_NEON) || defined(__wasm_simd128__) +#if defined(__ARM_NEON) || defined(__wasm_simd128__) || defined(__POWER9_VECTOR__) #define B1(c,s,n) 0x ## n ## c , 0x ## n ## s #define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s) #define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s) @@ -643,6 +643,38 @@ void quantize_row_q8_0(const float * restrict x, void * restrict vy, int64_t k) // store result __riscv_vse8_v_i8m1(y[i].qs , vs, vl); } +#elif defined(__POWER9_VECTOR__) + for (int i = 0; i < nb; i++) { + vector float srcv [8]; + vector float asrcv[8]; + vector float amaxv[8]; + vector signed int vi[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); + + const float amax = MAX(MAX(vec_extract(amaxv[0], 0), + vec_extract(amaxv[0], 1)), + MAX(vec_extract(amaxv[0], 2), + vec_extract(amaxv[0], 3))); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + const vector float vid = vec_splats(id); + + y[i].d = GGML_FP32_TO_FP16(d); + + for (int j = 0; j < 8; j++) { + const vector float v = vec_round(vec_mul(srcv[j], vid)); + vi[j] = vec_cts(v, 0); + } + vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); + vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); + } #else GGML_UNUSED(nb); // scalar @@ -898,6 +930,46 @@ void quantize_row_q8_1(const float * restrict x, void * restrict vy, int64_t k) int sum = __riscv_vmv_x_s_i16m1_i16(vwrs); y[i].s = GGML_FP32_TO_FP16(sum*d); } +#elif defined(__POWER9_VECTOR__) + for (int i = 0; i < nb; i++) { + vector float srcv [8]; + vector float asrcv[8]; + vector float amaxv[8]; + vector signed int vi[8]; + + for (int j = 0; j < 8; j++) srcv[j] = vec_xl(0, x + i*32 + 4*j); + for (int j = 0; j < 8; j++) asrcv[j] = vec_abs(srcv[j]); + + for (int j = 0; j < 4; j++) amaxv[2*j] = vec_max(asrcv[2*j], asrcv[2*j+1]); + for (int j = 0; j < 2; j++) amaxv[4*j] = vec_max(amaxv[4*j], amaxv[4*j+2]); + for (int j = 0; j < 1; j++) amaxv[8*j] = vec_max(amaxv[8*j], amaxv[8*j+4]); + + const float amax = MAX(MAX(vec_extract(amaxv[0], 0), + vec_extract(amaxv[0], 1)), + MAX(vec_extract(amaxv[0], 2), + vec_extract(amaxv[0], 3))); + + const float d = amax / ((1 << 7) - 1); + const float id = d ? 1.0f/d : 0.0f; + const vector float vid = vec_splats(id); + + y[i].d = GGML_FP32_TO_FP16(d); + + vector int accv = vec_splats(0); + + for (int j = 0; j < 8; j++) { + const vector float v = vec_round(vec_mul(srcv[j], vid)); + vi[j] = vec_cts(v, 0); + + accv = vec_add(accv, vi[j]); + } + vec_xst(vec_pack(vec_pack(vi[0], vi[1]), vec_pack(vi[2], vi[3])), 0, &y[i].qs[0]); + vec_xst(vec_pack(vec_pack(vi[4], vi[5]), vec_pack(vi[6], vi[7])), 16, &y[i].qs[0]); + + accv = vec_add(accv, vec_sld(accv, accv, 4)); + accv = vec_add(accv, vec_sld(accv, accv, 8)); + y[i].s = GGML_FP32_TO_FP16(d * vec_extract(accv, 0)); + } #else GGML_UNUSED(nb); // scalar @@ -3740,6 +3812,46 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r } *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector signed char v8 = vec_splats((signed char)0x8); + + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (int i = 0; i < nb; i++) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d)); + vector float vd = vec_mul(vxd, vyd); + + vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs); + vector signed char q8y0 = vec_xl( 0, y[i].qs); + vector signed char q8y1 = vec_xl(16, y[i].qs); + + vector signed char q4x0 = vec_and(qxs, lowMask); + vector signed char q4x1 = vec_sr(qxs, v4); + + q4x0 = vec_sub(q4x0, v8); + q4x1 = vec_sub(q4x1, v8); + + vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); + + qv0 = vec_add(qv0, qv1); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else // scalar float sumf = 0.0; @@ -3958,6 +4070,46 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r } *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (int i = 0; i < nb; i++) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d)); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].m)); + vector float vys = {GGML_FP16_TO_FP32(y[i].s), 0.0f, 0.0f, 0.0f}; + vsumf0 = vec_madd(vxmin, vys, vsumf0); + + vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs); + vector signed char q8y0 = vec_xl( 0, y[i].qs); + vector signed char q8y1 = vec_xl(16, y[i].qs); + + vector signed char q4x0 = vec_and(qxs, lowMask); + vector signed char q4x1 = vec_sr(qxs, v4); + + vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); + + qv0 = vec_add(qv0, qv1); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else // scalar float sumf = 0.0; @@ -4243,6 +4395,49 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * r } *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)4); + + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d)); + vector float vd = vec_mul(vxd, vyd); + + vector signed long long aux64x2_0 = {(uint64_t)(table_b2b_1[x[i].qh[0]]), (uint64_t)(table_b2b_1[x[i].qh[1]])}; + vector signed long long aux64x2_1 = {(uint64_t)(table_b2b_1[x[i].qh[2]]), (uint64_t)(table_b2b_1[x[i].qh[3]])}; + + vector signed char qh0 = (vector signed char)aux64x2_0; + vector signed char qh1 = (vector signed char)aux64x2_1; + + vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs); + + vector signed char q5x0 = vec_sub(vec_and (qxs, lowMask), qh0); + vector signed char q5x1 = vec_sub(vec_sr(qxs, v4), qh1); + + vector signed char q8y0 = vec_xl( 0, y[i].qs); + vector signed char q8y1 = vec_xl( 16, y[i].qs); + + vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1)); + + qv0 = vec_add(qv0, qv1); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else // scalar float sumf = 0.0; @@ -4547,6 +4742,53 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * r } *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d)); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].m)); + vector float vys = {GGML_FP16_TO_FP32(y[i].s), 0.f, 0.f, 0.f}; + vsumf0 = vec_madd(vxmin, vys, vsumf0); + + vector unsigned long long aux64x2_0 = {(uint64_t)(table_b2b_0[x[i].qh[0]]), (uint64_t)(table_b2b_0[x[i].qh[1]])}; + vector unsigned long long aux64x2_1 = {(uint64_t)(table_b2b_0[x[i].qh[2]]), (uint64_t)(table_b2b_0[x[i].qh[3]])}; + + vector signed char qh0 = (vector signed char)aux64x2_0; + vector signed char qh1 = (vector signed char)aux64x2_1; + + vector signed char qxs = (vector signed char)vec_xl( 0, x[i].qs); + + vector signed char q5x0 = vec_or(vec_and(qxs, lowMask), qh0); + vector signed char q5x1 = vec_or(vec_sr(qxs, v4), qh1); + + vector signed char q8y0 = vec_xl( 0, y[i].qs); + vector signed char q8y1 = vec_xl( 16, y[i].qs); + + vector signed short qv0 = vec_add(vec_mule(q5x0, q8y0), vec_mulo(q5x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q5x1, q8y1), vec_mulo(q5x1, q8y1)); + + qv0 = vec_add(qv0, qv1); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else // scalar float sumf = 0.0; @@ -4722,6 +4964,45 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r } *s = sumf; +#elif defined(__POWER9_VECTOR__) + vector float vsumf0 = vec_splats(0.0f); + +#pragma GCC unroll 4 + for (int i = 0; i < nb; i++) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[i].d)); + vector float vd = vec_mul(vxd, vyd); + + vector signed char q8x0 = vec_xl( 0, x[i].qs); + vector signed char q8x1 = vec_xl(16, x[i].qs); + vector signed char q8y0 = vec_xl( 0, y[i].qs); + vector signed char q8y1 = vec_xl(16, y[i].qs); + + vector signed short qv0 = vec_mule(q8x0, q8y0); + vector signed short qv1 = vec_mulo(q8x0, q8y0); + vector signed short qv2 = vec_mule(q8x1, q8y1); + vector signed short qv3 = vec_mulo(q8x1, q8y1); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackh(qv1)); + vector signed int vsumi1 = vec_add(vec_unpackl(qv0), vec_unpackl(qv1)); + vector signed int vsumi2 = vec_add(vec_unpackh(qv2), vec_unpackh(qv3)); + vector signed int vsumi3 = vec_add(vec_unpackl(qv2), vec_unpackl(qv3)); + + vsumi0 = vec_add(vsumi0, vsumi2); + vsumi1 = vec_add(vsumi1, vsumi3); + + vsumi0 = vec_add(vsumi0, vsumi1); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + } + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else // scalar float sumf = 0.0; @@ -5077,6 +5358,147 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0x3); + const vector signed char lowScaleMask = vec_splats((signed char)0xF); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vdmin = vec_mul(vxmin, vyd); + + vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); + vector signed short q8ysums1 = vec_xl(16, y[i].bsums); + + vector signed char q2xmins = (vector signed char)vec_xl( 0, x[i].scales); + vector signed char vscales = vec_and(q2xmins, lowScaleMask); + + q2xmins = vec_sr(q2xmins, v4); + vector signed short q2xmins0 = vec_unpackh(q2xmins); + vector signed short q2xmins1 = vec_unpackl(q2xmins); + + vector signed int prod0 = vec_mule(q2xmins0, q8ysums0); + vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0); + vector signed int prod2 = vec_mule(q2xmins1, q8ysums1); + vector signed int prod3 = vec_mulo(q2xmins1, q8ysums1); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); + vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); + vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint8_t * restrict q2 = x[i].qs; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/128; ++j) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q2); + vector signed char qxs1 = (vector signed char)vec_xl(16, q2); + q2 += 32; + + vector signed char q2x00 = vec_and(qxs0, lowMask); + vector signed char q2x01 = vec_and(vec_sr(qxs0, v2), lowMask); + vector signed char q2x02 = vec_and(vec_sr(qxs0, v4), lowMask); + vector signed char q2x03 = vec_and(vec_sr(qxs0, v6), lowMask); + vector signed char q2x10 = vec_and(qxs1, lowMask); + vector signed char q2x11 = vec_and(vec_sr(qxs1, v2), lowMask); + vector signed char q2x12 = vec_and(vec_sr(qxs1, v4), lowMask); + vector signed char q2x13 = vec_and(vec_sr(qxs1, v6), lowMask); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl( 16, q8); + vector signed char q8y01 = vec_xl( 32, q8); + vector signed char q8y11 = vec_xl( 48, q8); + vector signed char q8y02 = vec_xl( 64, q8); + vector signed char q8y12 = vec_xl( 80, q8); + vector signed char q8y03 = vec_xl( 96, q8); + vector signed char q8y13 = vec_xl(112, q8); + q8 += 128; + + vector signed short qv0 = vec_add(vec_mule(q2x00, q8y00), vec_mulo(q2x00, q8y00)); + vector signed short qv1 = vec_add(vec_mule(q2x01, q8y01), vec_mulo(q2x01, q8y01)); + vector signed short qv2 = vec_add(vec_mule(q2x02, q8y02), vec_mulo(q2x02, q8y02)); + vector signed short qv3 = vec_add(vec_mule(q2x03, q8y03), vec_mulo(q2x03, q8y03)); + vector signed short qv4 = vec_add(vec_mule(q2x10, q8y10), vec_mulo(q2x10, q8y10)); + vector signed short qv5 = vec_add(vec_mule(q2x11, q8y11), vec_mulo(q2x11, q8y11)); + vector signed short qv6 = vec_add(vec_mule(q2x12, q8y12), vec_mulo(q2x12, q8y12)); + vector signed short qv7 = vec_add(vec_mule(q2x13, q8y13), vec_mulo(q2x13, q8y13)); + + vector signed short vscales_h = vec_unpackh(vscales); + vector signed short vs0 = vec_splat(vscales_h, 0); + vector signed short vs1 = vec_splat(vscales_h, 1); + vector signed short vs2 = vec_splat(vscales_h, 2); + vector signed short vs3 = vec_splat(vscales_h, 3); + vector signed short vs4 = vec_splat(vscales_h, 4); + vector signed short vs5 = vec_splat(vscales_h, 5); + vector signed short vs6 = vec_splat(vscales_h, 6); + vector signed short vs7 = vec_splat(vscales_h, 7); + vscales = vec_sld(vscales, vscales, 8); + + qv0 = vec_mul(qv0, vs0); + qv1 = vec_mul(qv1, vs2); + qv2 = vec_mul(qv2, vs4); + qv3 = vec_mul(qv3, vs6); + + qv0 = vec_madd(qv4, vs1, qv0); + qv1 = vec_madd(qv5, vs3, qv1); + qv2 = vec_madd(qv6, vs5, qv2); + qv3 = vec_madd(qv7, vs7, qv3); + + vsumi0 = vec_add(vec_unpackh(qv0), vsumi0); + vsumi1 = vec_add(vec_unpackh(qv1), vsumi1); + vsumi2 = vec_add(vec_unpackh(qv2), vsumi2); + vsumi3 = vec_add(vec_unpackh(qv3), vsumi3); + + vsumi4 = vec_add(vec_unpackl(qv0), vsumi4); + vsumi5 = vec_add(vec_unpackl(qv1), vsumi5); + vsumi6 = vec_add(vec_unpackl(qv2), vsumi6); + vsumi7 = vec_add(vec_unpackl(qv3), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else float sumf = 0; @@ -5347,6 +5769,87 @@ void ggml_vec_dot_q2_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0x3); + const vector signed char lowScaleMask = vec_splats((signed char)0xF); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + +#pragma GCC unroll 2 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vdmin = vec_mul(vxmin, vyd); + + vector signed short q8ysums0 = vec_xl_len(y[i].bsums, 8); + + vector signed char q2xmins = (vector signed char)vec_xl_len(x[i].scales, 4); + vector signed char vscales = vec_and(q2xmins, lowScaleMask); + + q2xmins = vec_sr(q2xmins, v4); + vector signed short q2xmins0 = vec_unpackh((vector signed char)q2xmins); + + vector signed int prod0 = vec_mule(q2xmins0, q8ysums0); + vector signed int prod1 = vec_mulo(q2xmins0, q8ysums0); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].qs); + vector signed char q2x00 = vec_and(qxs0, lowMask); + vector signed char q2x01 = vec_and(vec_sr(qxs0, v2), lowMask); + vector signed char q2x02 = vec_and(vec_sr(qxs0, v4), lowMask); + vector signed char q2x03 = vec_and(vec_sr(qxs0, v6), lowMask); + + vector signed char q8y00 = vec_xl( 0, y[i].qs); + vector signed char q8y01 = vec_xl( 16, y[i].qs); + vector signed char q8y02 = vec_xl( 32, y[i].qs); + vector signed char q8y03 = vec_xl( 48, y[i].qs); + + vector signed short qv0 = vec_add(vec_mule(q2x00, q8y00), vec_mulo(q2x00, q8y00)); + vector signed short qv1 = vec_add(vec_mule(q2x01, q8y01), vec_mulo(q2x01, q8y01)); + vector signed short qv2 = vec_add(vec_mule(q2x02, q8y02), vec_mulo(q2x02, q8y02)); + vector signed short qv3 = vec_add(vec_mule(q2x03, q8y03), vec_mulo(q2x03, q8y03)); + + vector signed short vscales_h = vec_unpackh(vscales); + vector signed short vs0 = vec_splat(vscales_h, 0); + vector signed short vs1 = vec_splat(vscales_h, 1); + vector signed short vs2 = vec_splat(vscales_h, 2); + vector signed short vs3 = vec_splat(vscales_h, 3); + + vector signed int vsumi0 = vec_add(vec_mule(qv0, vs0), vec_mulo(qv0, vs0)); + vector signed int vsumi1 = vec_add(vec_mule(qv1, vs1), vec_mulo(qv1, vs1)); + vector signed int vsumi2 = vec_add(vec_mule(qv2, vs2), vec_mulo(qv2, vs2)); + vector signed int vsumi3 = vec_add(vec_mule(qv3, vs3), vec_mulo(qv3, vs3)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else float sumf = 0; @@ -5841,6 +6344,160 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0x3); + const vector signed char v1 = vec_splats((signed char)0x1); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v3 = vec_splats((unsigned char)0x3); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector signed char off = vec_splats((signed char)0x20); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + uint32_t aux[3]; + uint32_t utmp[4]; + + memcpy(aux, x[i].scales, 12); + utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4); + utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4); + utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4); + utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4); + + vector signed char vscales = (vector signed char)vec_xl( 0, utmp); + vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].hmask); + vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].hmask); + + vscales = vec_sub(vscales, off); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint8_t * restrict q3 = x[i].qs; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/128; ++j) { + __builtin_prefetch(q3, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q3); + vector signed char qxs1 = (vector signed char)vec_xl(16, q3); + q3 += 32; + + //the low 2 bits + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask); + vector signed char qxs02 = vec_and(vec_sr(qxs0, v4), lowMask); + vector signed char qxs03 = vec_and(vec_sr(qxs0, v6), lowMask); + vector signed char qxs10 = vec_and(qxs1, lowMask); + vector signed char qxs11 = vec_and(vec_sr(qxs1, v2), lowMask); + vector signed char qxs12 = vec_and(vec_sr(qxs1, v4), lowMask); + vector signed char qxs13 = vec_and(vec_sr(qxs1, v6), lowMask); + + //the 3rd bit + vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2); + vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, (vector unsigned char)v1)), v2); + vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2); + vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v3)), v2); + vector signed char qxh10 = vec_sl(vec_andc(v1, qxhs1), v2); + vector signed char qxh11 = vec_sl(vec_andc(v1, vec_sr(qxhs1, (vector unsigned char)v1)), v2); + vector signed char qxh12 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v2)), v2); + vector signed char qxh13 = vec_sl(vec_andc(v1, vec_sr(qxhs1, v3)), v2); + qxhs0 = vec_sr(qxhs0, v4); + qxhs1 = vec_sr(qxhs1, v4); + + vector signed char q3x00 = vec_sub(qxs00, qxh00); + vector signed char q3x01 = vec_sub(qxs01, qxh01); + vector signed char q3x02 = vec_sub(qxs02, qxh02); + vector signed char q3x03 = vec_sub(qxs03, qxh03); + vector signed char q3x10 = vec_sub(qxs10, qxh10); + vector signed char q3x11 = vec_sub(qxs11, qxh11); + vector signed char q3x12 = vec_sub(qxs12, qxh12); + vector signed char q3x13 = vec_sub(qxs13, qxh13); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl( 16, q8); + vector signed char q8y01 = vec_xl( 32, q8); + vector signed char q8y11 = vec_xl( 48, q8); + vector signed char q8y02 = vec_xl( 64, q8); + vector signed char q8y12 = vec_xl( 80, q8); + vector signed char q8y03 = vec_xl( 96, q8); + vector signed char q8y13 = vec_xl(112, q8); + q8 += 128; + + vector signed short vscales_h = vec_unpackh(vscales); + vector signed short vs0 = vec_splat(vscales_h, 0); + vector signed short vs1 = vec_splat(vscales_h, 1); + vector signed short vs2 = vec_splat(vscales_h, 2); + vector signed short vs3 = vec_splat(vscales_h, 3); + vector signed short vs4 = vec_splat(vscales_h, 4); + vector signed short vs5 = vec_splat(vscales_h, 5); + vector signed short vs6 = vec_splat(vscales_h, 6); + vector signed short vs7 = vec_splat(vscales_h, 7); + vscales = vec_sld(vscales, vscales, 8); + + vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00)); + vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01)); + vector signed short qv02 = vec_add(vec_mule(q3x02, q8y02), vec_mulo(q3x02, q8y02)); + vector signed short qv03 = vec_add(vec_mule(q3x03, q8y03), vec_mulo(q3x03, q8y03)); + vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10)); + vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11)); + vector signed short qv12 = vec_add(vec_mule(q3x12, q8y12), vec_mulo(q3x12, q8y12)); + vector signed short qv13 = vec_add(vec_mule(q3x13, q8y13), vec_mulo(q3x13, q8y13)); + + vector signed int vsum0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0)); + vector signed int vsum1 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2)); + vector signed int vsum2 = vec_add(vec_mule(qv02, vs4), vec_mulo(qv02, vs4)); + vector signed int vsum3 = vec_add(vec_mule(qv03, vs6), vec_mulo(qv03, vs6)); + vector signed int vsum4 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1)); + vector signed int vsum5 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3)); + vector signed int vsum6 = vec_add(vec_mule(qv12, vs5), vec_mulo(qv12, vs5)); + vector signed int vsum7 = vec_add(vec_mule(qv13, vs7), vec_mulo(qv13, vs7)); + + vsumi0 = vec_add(vsum0, vsumi0); + vsumi1 = vec_add(vsum1, vsumi1); + vsumi2 = vec_add(vsum2, vsumi2); + vsumi3 = vec_add(vsum3, vsumi3); + vsumi4 = vec_add(vsum4, vsumi4); + vsumi5 = vec_add(vsum5, vsumi5); + vsumi6 = vec_add(vsum6, vsumi6); + vsumi7 = vec_add(vsum7, vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else // scalar version // This function is written like this so the compiler can manage to vectorize most of it @@ -6207,6 +6864,95 @@ void ggml_vec_dot_q3_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0x3); + const vector signed char v1 = vec_splats((signed char)0x1); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector signed char off = vec_splats((signed char)0x8); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + +#pragma GCC unroll 2 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + uint16_t aux16[2]; + int8_t * scales = (int8_t *)aux16; + + const uint16_t a = *(const uint16_t *)x[i].scales; + aux16[0] = a & 0x0f0f; + aux16[1] = (a >> 4) & 0x0f0f; + + vector signed char vscales = (vector signed char)vec_xl_len(scales, 8); + vector signed char qxhs0 = (vector signed char)vec_xl_len(x[i].hmask, 8); + qxhs0 = vec_or(qxhs0, vec_sr(vec_sld(qxhs0, qxhs0, 8), (vector unsigned char)v1)); + + vscales = vec_sub(vscales, off); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].qs); + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_and(vec_sr(qxs0, v2), lowMask); + vector signed char qxs10 = vec_and(vec_sr(qxs0, v4), lowMask); + vector signed char qxs11 = vec_and(vec_sr(qxs0, v6), lowMask); + + //the 3rd bit + vector signed char qxh00 = vec_sl(vec_andc(v1, qxhs0), v2); + vector signed char qxh01 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v2)), v2); + vector signed char qxh02 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v4)), v2); + vector signed char qxh03 = vec_sl(vec_andc(v1, vec_sr(qxhs0, v6)), v2); + qxhs0 = vec_sr(qxhs0, v4); + + vector signed char q3x00 = vec_sub(qxs00, qxh00); + vector signed char q3x01 = vec_sub(qxs01, qxh01); + vector signed char q3x10 = vec_sub(qxs10, qxh02); + vector signed char q3x11 = vec_sub(qxs11, qxh03); + + vector signed char q8y00 = vec_xl( 0, y[i].qs); + vector signed char q8y01 = vec_xl( 16, y[i].qs); + vector signed char q8y10 = vec_xl( 32, y[i].qs); + vector signed char q8y11 = vec_xl( 48, y[i].qs); + + vector signed short vscales_h = vec_unpackh(vscales); + vector signed short vs0 = vec_splat(vscales_h, 0); + vector signed short vs1 = vec_splat(vscales_h, 1); + vector signed short vs2 = vec_splat(vscales_h, 2); + vector signed short vs3 = vec_splat(vscales_h, 3); + + vector signed short qv00 = vec_add(vec_mule(q3x00, q8y00), vec_mulo(q3x00, q8y00)); + vector signed short qv10 = vec_add(vec_mule(q3x10, q8y10), vec_mulo(q3x10, q8y10)); + vector signed short qv01 = vec_add(vec_mule(q3x01, q8y01), vec_mulo(q3x01, q8y01)); + vector signed short qv11 = vec_add(vec_mule(q3x11, q8y11), vec_mulo(q3x11, q8y11)); + + vector signed int vsumi0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0)); + vector signed int vsumi1 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1)); + vector signed int vsumi2 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2)); + vector signed int vsumi3 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else int8_t aux8[QK_K]; @@ -6559,6 +7305,142 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vdmin = vec_mul(vxmin, vyd); + + vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); + vector signed short q8ysums1 = vec_xl(16, y[i].bsums); + + memcpy(utmp, x[i].scales, 12); + + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + vector signed char utmps = (vector signed char)vec_xl( 0, utmp); + vector signed short vscales = vec_unpackh(utmps); + vector signed short q4xmins = vec_unpackl(utmps); + vector signed short q4xmins0 = vec_mergeh(q4xmins, q4xmins); + vector signed short q4xmins1 = vec_mergel(q4xmins, q4xmins); + + vector signed int prod0 = vec_mule(q4xmins0, q8ysums0); + vector signed int prod1 = vec_mule(q4xmins1, q8ysums1); + vector signed int prod2 = vec_mulo(q4xmins0, q8ysums0); + vector signed int prod3 = vec_mulo(q4xmins1, q8ysums1); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); + vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); + vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint8_t * restrict q4 = x[i].qs; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/64; j+=2) { + __builtin_prefetch(q4, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); + vector signed char qxs1 = (vector signed char)vec_xl(16, q4); + vector signed char qxs2 = (vector signed char)vec_xl(32, q4); + vector signed char qxs3 = (vector signed char)vec_xl(48, q4); + q4 += 64; + + vector signed char q4x00 = vec_and(qxs0, lowMask); + vector signed char q4x01 = vec_sr(qxs0, v4); + vector signed char q4x10 = vec_and(qxs1, lowMask); + vector signed char q4x11 = vec_sr(qxs1, v4); + vector signed char q4x20 = vec_and(qxs2, lowMask); + vector signed char q4x21 = vec_sr(qxs2, v4); + vector signed char q4x30 = vec_and(qxs3, lowMask); + vector signed char q4x31 = vec_sr(qxs3, v4); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl( 16, q8); + vector signed char q8y01 = vec_xl( 32, q8); + vector signed char q8y11 = vec_xl( 48, q8); + vector signed char q8y20 = vec_xl( 64, q8); + vector signed char q8y30 = vec_xl( 80, q8); + vector signed char q8y21 = vec_xl( 96, q8); + vector signed char q8y31 = vec_xl(112, q8); + q8 += 128; + + vector signed short qv00 = vec_add(vec_mule(q4x00, q8y00), vec_mulo(q4x00, q8y00)); + vector signed short qv01 = vec_add(vec_mule(q4x01, q8y01), vec_mulo(q4x01, q8y01)); + vector signed short qv10 = vec_add(vec_mule(q4x10, q8y10), vec_mulo(q4x10, q8y10)); + vector signed short qv11 = vec_add(vec_mule(q4x11, q8y11), vec_mulo(q4x11, q8y11)); + vector signed short qv20 = vec_add(vec_mule(q4x20, q8y20), vec_mulo(q4x20, q8y20)); + vector signed short qv21 = vec_add(vec_mule(q4x21, q8y21), vec_mulo(q4x21, q8y21)); + vector signed short qv30 = vec_add(vec_mule(q4x30, q8y30), vec_mulo(q4x30, q8y30)); + vector signed short qv31 = vec_add(vec_mule(q4x31, q8y31), vec_mulo(q4x31, q8y31)); + + vector signed short vs0 = vec_splat(vscales, 0); + vector signed short vs1 = vec_splat(vscales, 1); + vector signed short vs2 = vec_splat(vscales, 2); + vector signed short vs3 = vec_splat(vscales, 3); + vscales = vec_sld(vscales, vscales, 8); + + qv00 = vec_add(qv00, qv10); + qv10 = vec_add(qv01, qv11); + qv20 = vec_add(qv20, qv30); + qv30 = vec_add(qv21, qv31); + + vsumi0 = vec_add(vec_mule(qv00, vs0), vsumi0); + vsumi1 = vec_add(vec_mulo(qv00, vs0), vsumi1); + vsumi2 = vec_add(vec_mule(qv10, vs1), vsumi2); + vsumi3 = vec_add(vec_mulo(qv10, vs1), vsumi3); + vsumi4 = vec_add(vec_mule(qv20, vs2), vsumi4); + vsumi5 = vec_add(vec_mulo(qv20, vs2), vsumi5); + vsumi6 = vec_add(vec_mule(qv30, vs3), vsumi6); + vsumi7 = vec_add(vec_mulo(qv30, vs3), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else @@ -6825,6 +7707,87 @@ void ggml_vec_dot_q4_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + +#pragma GCC unroll 2 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d[1])); + vector float vyd = vec_splats(y[i].d); + vector float vd= vec_mul(vxd, vyd); + + uint16_t s16[2]; + const uint8_t * scales = (const uint8_t *)s16; + + const uint16_t * restrict b = (const uint16_t *)x[i].scales; + s16[0] = b[0] & 0x0f0f; + s16[1] = (b[0] >> 4) & 0x0f0f; + + vector signed char utmps = (vector signed char)vec_xl_len(scales, 4); + vector signed short vscales = (vector signed short)vec_unpackh(utmps); + vector signed short q4xmins0 = vec_mergeh(vscales, vscales); + q4xmins0 = vec_sld(q4xmins0, q4xmins0, 8); + + vector signed short q8ysums0 = vec_xl_len((const int16_t *)(y[i].bsums), 8); + + vector signed int prod0 = vec_mule(q4xmins0, q8ysums0); + vector signed int prod1 = vec_mulo(q4xmins0, q8ysums0); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vd, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vd, vsumf1); + + vd = vec_mul(vyd, vec_splats(GGML_FP16_TO_FP32(x[i].d[0]))); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].qs); + vector signed char qxs1 = (vector signed char)vec_xl(16, x[i].qs); + vector signed char q4x00 = vec_and(qxs0, lowMask); + vector signed char q4x01 = vec_sr(qxs0, v4); + vector signed char q4x10 = vec_and(qxs1, lowMask); + vector signed char q4x11 = vec_sr(qxs1, v4); + + vector signed char q8y00 = vec_xl( 0, y[i].qs); + vector signed char q8y10 = vec_xl(16, y[i].qs); + vector signed char q8y01 = vec_xl(32, y[i].qs); + vector signed char q8y11 = vec_xl(48, y[i].qs); + + vector signed short qv00 = vec_add(vec_mule(q4x00, q8y00), vec_mulo(q4x00, q8y00)); + vector signed short qv01 = vec_add(vec_mule(q4x01, q8y01), vec_mulo(q4x01, q8y01)); + vector signed short qv10 = vec_add(vec_mule(q4x10, q8y10), vec_mulo(q4x10, q8y10)); + vector signed short qv11 = vec_add(vec_mule(q4x11, q8y11), vec_mulo(q4x11, q8y11)); + + vector signed short vs0 = vec_splat(vscales, 0); + vector signed short vs1 = vec_splat(vscales, 1); + + vector signed int vsumi0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0)); + vector signed int vsumi1 = vec_add(vec_mule(qv10, vs0), vec_mulo(qv10, vs0)); + vector signed int vsumi2 = vec_add(vec_mule(qv01, vs1), vec_mulo(qv01, vs1)); + vector signed int vsumi3 = vec_add(vec_mule(qv11, vs1), vec_mulo(qv11, vs1)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else uint8_t aux8[QK_K]; @@ -7226,6 +8189,130 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf+sums; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v1 = vec_splats((unsigned char)0x1); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v3 = vec_splats((unsigned char)0x3); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector float vxmin = vec_splats(GGML_FP16_TO_FP32(x[i].dmin)); + vector float vdmin = vec_mul(vxmin, vyd); + + memcpy(utmp, x[i].scales, 12); + + utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4); + const uint32_t uaux = utmp[1] & kmask1; + utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4); + utmp[2] = uaux; + utmp[0] &= kmask1; + + vector signed short q8ysums0 = vec_xl( 0, y[i].bsums); + vector signed short q8ysums1 = vec_xl(16, y[i].bsums); + + vector signed char utmps = (vector signed char)vec_xl( 0, utmp); + vector signed short vscales = vec_unpackh(utmps); + + vector signed short q5xmins = vec_unpackl(utmps); + vector signed short q5xmins0 = vec_mergeh(q5xmins, q5xmins); + vector signed short q5xmins1 = vec_mergel(q5xmins, q5xmins); + + vector signed int prod0 = vec_mule(q5xmins0, q8ysums0); + vector signed int prod1 = vec_mule(q5xmins1, q8ysums1); + vector signed int prod2 = vec_mulo(q5xmins0, q8ysums0); + vector signed int prod3 = vec_mulo(q5xmins1, q8ysums1); + + vsumf0 = vec_nmsub(vec_ctf(prod0, 0), vdmin, vsumf0); + vsumf1 = vec_nmsub(vec_ctf(prod1, 0), vdmin, vsumf1); + vsumf2 = vec_nmsub(vec_ctf(prod2, 0), vdmin, vsumf2); + vsumf3 = vec_nmsub(vec_ctf(prod3, 0), vdmin, vsumf3); + + vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh); + vector signed char qxhs1 = (vector signed char)vec_xl(16, x[i].qh); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + + const uint8_t * restrict q5 = x[i].qs; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/64; ++j) { + __builtin_prefetch(q5, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q5); + vector signed char qxs1 = (vector signed char)vec_xl(16, q5); + q5 += 32; + + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_sr(qxs0, v4); + vector signed char qxs10 = vec_and(qxs1, lowMask); + vector signed char qxs11 = vec_sr(qxs1, v4); + + vector signed char q5h00 = vec_sl(vec_and((vector signed char)v1, qxhs0), v4); + vector signed char q5h01 = vec_sl(vec_and((vector signed char)v2, qxhs0), v3); + vector signed char q5h10 = vec_sl(vec_and((vector signed char)v1, qxhs1), v4); + vector signed char q5h11 = vec_sl(vec_and((vector signed char)v2, qxhs1), v3); + qxhs0 = vec_sr(qxhs0, v2); + qxhs1 = vec_sr(qxhs1, v2); + + vector signed char q5x00 = vec_or(q5h00, qxs00); + vector signed char q5x01 = vec_or(q5h01, qxs01); + vector signed char q5x10 = vec_or(q5h10, qxs10); + vector signed char q5x11 = vec_or(q5h11, qxs11); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl(16, q8); + vector signed char q8y01 = vec_xl(32, q8); + vector signed char q8y11 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv00 = vec_add(vec_mule(q5x00, q8y00), vec_mulo(q5x00, q8y00)); + vector signed short qv01 = vec_add(vec_mule(q5x01, q8y01), vec_mulo(q5x01, q8y01)); + vector signed short qv10 = vec_add(vec_mule(q5x10, q8y10), vec_mulo(q5x10, q8y10)); + vector signed short qv11 = vec_add(vec_mule(q5x11, q8y11), vec_mulo(q5x11, q8y11)); + + vector signed short vs0 = vec_splat(vscales, 0); + vector signed short vs1 = vec_splat(vscales, 1); + vscales = vec_sld(vscales, vscales, 12); + + qv00 = vec_add(qv00, qv10); + qv01 = vec_add(qv01, qv11); + + vsumi0 = vec_add(vec_mule(qv00, vs0), vsumi0); + vsumi1 = vec_add(vec_mulo(qv00, vs0), vsumi1); + vsumi2 = vec_add(vec_mule(qv01, vs1), vsumi2); + vsumi3 = vec_add(vec_mulo(qv01, vs1), vsumi3); + } + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else const uint8_t * scales = (const uint8_t*)&utmp[0]; @@ -7523,6 +8610,83 @@ void ggml_vec_dot_q5_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v1 = vec_splats((unsigned char)0x1); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + +#pragma GCC unroll 2 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].qs, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd= vec_mul(vxd, vyd); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].qs); + vector signed char qxs1 = (vector signed char)vec_xl(16, x[i].qs); + vector signed char qxs00 = (vector signed char)vec_and(qxs0, lowMask); + vector signed char qxs01 = (vector signed char)vec_sr(qxs0, v4); + vector signed char qxs10 = (vector signed char)vec_and(qxs1, lowMask); + vector signed char qxs11 = (vector signed char)vec_sr(qxs1, v4); + + vector signed char qxhs = (vector signed char)vec_xl_len(x[i].qh, 8); + vector signed char qxhs0 = vec_or(qxhs, vec_sr(vec_sld(qxhs, qxhs, 8), v1)); + vector signed char qxhs1 = vec_sr(qxhs0, v2); + vector signed char qxh00 = vec_sl(vec_andc((vector signed char)v1, qxhs0), v4); + vector signed char qxh10 = vec_sl(vec_andc((vector signed char)v1, qxhs1), v4); + vector signed char qxh01 = vec_sl(vec_andc((vector signed char)v1, vec_sr(qxhs0, v4)), v4); + vector signed char qxh11 = vec_sl(vec_andc((vector signed char)v1, vec_sr(qxhs1, v4)), v4); + + vector signed char q5x00 = vec_sub(qxs00, qxh00); + vector signed char q5x10 = vec_sub(qxs10, qxh10); + vector signed char q5x01 = vec_sub(qxs01, qxh01); + vector signed char q5x11 = vec_sub(qxs11, qxh11); + + vector signed char q8y00 = vec_xl( 0, y[i].qs); + vector signed char q8y10 = vec_xl(16, y[i].qs); + vector signed char q8y01 = vec_xl(32, y[i].qs); + vector signed char q8y11 = vec_xl(48, y[i].qs); + + vector signed short qv00 = vec_add(vec_mule(q5x00, q8y00), vec_mulo(q5x00, q8y00)); + vector signed short qv01 = vec_add(vec_mule(q5x01, q8y01), vec_mulo(q5x01, q8y01)); + vector signed short qv10 = vec_add(vec_mule(q5x10, q8y10), vec_mulo(q5x10, q8y10)); + vector signed short qv11 = vec_add(vec_mule(q5x11, q8y11), vec_mulo(q5x11, q8y11)); + + vector signed short vs = (vector signed short)vec_unpackh(vec_xl_len(x[i].scales, 4)); + vector signed short vs0 = vec_splat(vs, 0); + vector signed short vs1 = vec_splat(vs, 1); + vector signed short vs2 = vec_splat(vs, 2); + vector signed short vs3 = vec_splat(vs, 3); + + vector signed int vsumi0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0)); + vector signed int vsumi1 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1)); + vector signed int vsumi2 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2)); + vector signed int vsumi3 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else int8_t aux8[QK_K]; @@ -7953,6 +9117,151 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v3 = vec_splats((unsigned char)0x3); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector signed char off = vec_splats((signed char)0x20); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint8_t * restrict q6 = x[i].ql; + const uint8_t * restrict qh = x[i].qh; + const int8_t * restrict qs = x[i].scales; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/128; ++j) { + __builtin_prefetch(q6, 0, 0); + __builtin_prefetch(qh, 0, 0); + __builtin_prefetch(q8, 0, 0); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q6); + vector signed char qxs1 = (vector signed char)vec_xl(16, q6); + vector signed char qxs2 = (vector signed char)vec_xl(32, q6); + vector signed char qxs3 = (vector signed char)vec_xl(48, q6); + q6 += 64; + + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_sr(qxs0, v4); + vector signed char qxs10 = vec_and(qxs1, lowMask); + vector signed char qxs11 = vec_sr(qxs1, v4); + vector signed char qxs20 = vec_and(qxs2, lowMask); + vector signed char qxs21 = vec_sr(qxs2, v4); + vector signed char qxs30 = vec_and(qxs3, lowMask); + vector signed char qxs31 = vec_sr(qxs3, v4); + + vector signed char qxhs0 = (vector signed char)vec_xl( 0, qh); + vector signed char qxhs1 = (vector signed char)vec_xl(16, qh); + qh += 32; + + vector signed char qxh00 = vec_sl(vec_and((vector signed char)v3, qxhs0), v4); + vector signed char qxh01 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v4)), v4); + vector signed char qxh10 = vec_sl(vec_and((vector signed char)v3, qxhs1), v4); + vector signed char qxh11 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v4)), v4); + vector signed char qxh20 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v2)), v4); + vector signed char qxh21 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v6)), v4); + vector signed char qxh30 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v2)), v4); + vector signed char qxh31 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs1, v6)), v4); + + vector signed char q6x00 = vec_sub(vec_or(qxh00, qxs00), off); + vector signed char q6x01 = vec_sub(vec_or(qxh01, qxs01), off); + vector signed char q6x10 = vec_sub(vec_or(qxh10, qxs10), off); + vector signed char q6x11 = vec_sub(vec_or(qxh11, qxs11), off); + vector signed char q6x20 = vec_sub(vec_or(qxh20, qxs20), off); + vector signed char q6x21 = vec_sub(vec_or(qxh21, qxs21), off); + vector signed char q6x30 = vec_sub(vec_or(qxh30, qxs30), off); + vector signed char q6x31 = vec_sub(vec_or(qxh31, qxs31), off); + + vector signed char q8y00 = vec_xl( 0, q8); + vector signed char q8y10 = vec_xl( 16, q8); + vector signed char q8y20 = vec_xl( 32, q8); + vector signed char q8y30 = vec_xl( 48, q8); + vector signed char q8y01 = vec_xl( 64, q8); + vector signed char q8y11 = vec_xl( 80, q8); + vector signed char q8y21 = vec_xl( 96, q8); + vector signed char q8y31 = vec_xl(112, q8); + q8 += 128; + + vector signed short qv00 = vec_add(vec_mule(q6x00, q8y00), vec_mulo(q6x00, q8y00)); + vector signed short qv10 = vec_add(vec_mule(q6x10, q8y10), vec_mulo(q6x10, q8y10)); + vector signed short qv20 = vec_add(vec_mule(q6x20, q8y20), vec_mulo(q6x20, q8y20)); + vector signed short qv30 = vec_add(vec_mule(q6x30, q8y30), vec_mulo(q6x30, q8y30)); + vector signed short qv01 = vec_add(vec_mule(q6x01, q8y01), vec_mulo(q6x01, q8y01)); + vector signed short qv11 = vec_add(vec_mule(q6x11, q8y11), vec_mulo(q6x11, q8y11)); + vector signed short qv21 = vec_add(vec_mule(q6x21, q8y21), vec_mulo(q6x21, q8y21)); + vector signed short qv31 = vec_add(vec_mule(q6x31, q8y31), vec_mulo(q6x31, q8y31)); + + vector signed short vscales = vec_unpackh(vec_xl_len(qs, 8)); + qs += 8; + + vector signed short vs0 = vec_splat(vscales, 0); + vector signed short vs1 = vec_splat(vscales, 1); + vector signed short vs2 = vec_splat(vscales, 2); + vector signed short vs3 = vec_splat(vscales, 3); + vector signed short vs4 = vec_splat(vscales, 4); + vector signed short vs5 = vec_splat(vscales, 5); + vector signed short vs6 = vec_splat(vscales, 6); + vector signed short vs7 = vec_splat(vscales, 7); + + vsumi0 = vec_add(vec_mule(qv00, vs0), vsumi0); + vsumi1 = vec_add(vec_mulo(qv00, vs0), vsumi1); + vsumi2 = vec_add(vec_mule(qv01, vs4), vsumi2); + vsumi3 = vec_add(vec_mulo(qv01, vs4), vsumi3); + vsumi4 = vec_add(vec_mule(qv10, vs1), vsumi4); + vsumi5 = vec_add(vec_mulo(qv10, vs1), vsumi5); + vsumi6 = vec_add(vec_mule(qv11, vs5), vsumi6); + vsumi7 = vec_add(vec_mulo(qv11, vs5), vsumi7); + + vsumi0 = vec_add(vec_mule(qv20, vs2), vsumi0); + vsumi1 = vec_add(vec_mulo(qv20, vs2), vsumi1); + vsumi2 = vec_add(vec_mule(qv21, vs6), vsumi2); + vsumi3 = vec_add(vec_mulo(qv21, vs6), vsumi3); + vsumi4 = vec_add(vec_mule(qv30, vs3), vsumi4); + vsumi5 = vec_add(vec_mulo(qv30, vs3), vsumi5); + vsumi6 = vec_add(vec_mule(qv31, vs7), vsumi6); + vsumi7 = vec_add(vec_mulo(qv31, vs7), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else int8_t aux8[QK_K]; @@ -8259,6 +9568,85 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r *s = sumf; +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v2 = vec_splats((unsigned char)0x2); + const vector unsigned char v3 = vec_splats((unsigned char)0x3); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + const vector unsigned char v6 = vec_splats((unsigned char)0x6); + const vector signed char off = vec_splats((signed char)0x20); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + +#pragma GCC unroll 2 + for (int i = 0; i < nb; ++i) { + __builtin_prefetch(x[i].ql, 0, 1); + __builtin_prefetch(x[i].qh, 0, 1); + __builtin_prefetch(y[i].qs, 0, 1); + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd= vec_mul(vxd, vyd); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, x[i].ql); + vector signed char qxs1 = (vector signed char)vec_xl(16, x[i].ql); + vector signed char qxs00 = vec_and(qxs0, lowMask); + vector signed char qxs01 = vec_sr(qxs0, v4); + vector signed char qxs10 = vec_and(qxs1, lowMask); + vector signed char qxs11 = vec_sr(qxs1, v4); + + vector signed char qxhs0 = (vector signed char)vec_xl( 0, x[i].qh); + + vector signed char qxh00 = vec_sl(vec_and((vector signed char)v3, qxhs0), v4); + vector signed char qxh01 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v4)), v4); + vector signed char qxh10 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v2)), v4); + vector signed char qxh11 = vec_sl(vec_and((vector signed char)v3, vec_sr(qxhs0, v6)), v4); + + vector signed char q6x00 = vec_sub(vec_or(qxh00, qxs00), off); + vector signed char q6x01 = vec_sub(vec_or(qxh01, qxs01), off); + vector signed char q6x10 = vec_sub(vec_or(qxh10, qxs10), off); + vector signed char q6x11 = vec_sub(vec_or(qxh11, qxs11), off); + + vector signed char q8y00 = vec_xl( 0, y[i].qs); + vector signed char q8y10 = vec_xl(16, y[i].qs); + vector signed char q8y01 = vec_xl(32, y[i].qs); + vector signed char q8y11 = vec_xl(48, y[i].qs); + + vector signed short qv00 = vec_add(vec_mule(q6x00, q8y00), vec_mulo(q6x00, q8y00)); + vector signed short qv10 = vec_add(vec_mule(q6x10, q8y10), vec_mulo(q6x10, q8y10)); + vector signed short qv01 = vec_add(vec_mule(q6x01, q8y01), vec_mulo(q6x01, q8y01)); + vector signed short qv11 = vec_add(vec_mule(q6x11, q8y11), vec_mulo(q6x11, q8y11)); + + vector signed short vs = (vector signed short)vec_unpackh(vec_xl_len(x[i].scales, 4)); + vector signed short vs0 = vec_splat(vs, 0); + vector signed short vs1 = vec_splat(vs, 1); + vector signed short vs2 = vec_splat(vs, 2); + vector signed short vs3 = vec_splat(vs, 3); + + vector signed int vsumi0 = vec_add(vec_mule(qv00, vs0), vec_mulo(qv00, vs0)); + vector signed int vsumi1 = vec_add(vec_mule(qv10, vs1), vec_mulo(qv10, vs1)); + vector signed int vsumi2 = vec_add(vec_mule(qv01, vs2), vec_mulo(qv01, vs2)); + vector signed int vsumi3 = vec_add(vec_mule(qv11, vs3), vec_mulo(qv11, vs3)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); + #else int8_t aux8[QK_K]; @@ -8300,7 +9688,7 @@ void ggml_vec_dot_q6_K_q8_K(int n, float * restrict s, size_t bs, const void * r #endif -#if defined (__AVX2__) || defined (__ARM_NEON) +#if defined (__AVX2__) || defined (__ARM_NEON) || defined (__POWER9_VECTOR__) static const int8_t keven_signs_q2xs[1024] = { 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1, @@ -8433,6 +9821,103 @@ void ggml_vec_dot_iq2_xxs_q8_K(int n, float * restrict s, size_t bs, const void *s = 0.125f * hsum_float_8(accumf); +#elif defined(__POWER9_VECTOR__) + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint16_t * restrict q2 = x[i].qs; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + uint32_t aux32[4]; + const uint8_t * aux8 = (const uint8_t *)aux32; + + memcpy(aux32, q2, 4*sizeof(uint32_t)); + q2 += 8; + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xxs_grid + aux8[ 0]), *(const int64_t *)(iq2xxs_grid + aux8[ 1])}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xxs_grid + aux8[ 2]), *(const int64_t *)(iq2xxs_grid + aux8[ 3])}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xxs_grid + aux8[ 8]), *(const int64_t *)(iq2xxs_grid + aux8[ 9])}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xxs_grid + aux8[10]), *(const int64_t *)(iq2xxs_grid + aux8[11])}; + + vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((aux32[1] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 7) & 127))}; + vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((aux32[1] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[1] >> 21) & 127))}; + vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((aux32[3] >> 0) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 7) & 127))}; + vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((aux32[3] >> 14) & 127)), *(const int64_t *)(signs64 + ((aux32[3] >> 21) & 127))}; + + vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); + vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); + vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); + vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); + + const uint16_t ls0 = aux32[1] >> 28; + const uint16_t ls1 = aux32[3] >> 28; + + vector signed short vscales01 = vec_splats((int16_t)(2*ls0+1)); + vector signed short vscales23 = vec_splats((int16_t)(2*ls1+1)); + + vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.125f * vec_extract(vsumf0, 0); #else uint32_t aux32[2]; @@ -8708,6 +10193,104 @@ void ggml_vec_dot_iq2_xs_q8_K(int n, float * restrict s, size_t bs, const void * *s = 0.125f * hsum_float_8(accumf); #endif +#elif defined(__POWER9_VECTOR__) + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint16_t * restrict q2 = x[i].qs; + const uint8_t * restrict sc = x[i].scales; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/64; ++j) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq2xs_grid + (q2[0] & 511)), *(const int64_t *)(iq2xs_grid + (q2[1] & 511))}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq2xs_grid + (q2[2] & 511)), *(const int64_t *)(iq2xs_grid + (q2[3] & 511))}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq2xs_grid + (q2[4] & 511)), *(const int64_t *)(iq2xs_grid + (q2[5] & 511))}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq2xs_grid + (q2[6] & 511)), *(const int64_t *)(iq2xs_grid + (q2[7] & 511))}; + + vector signed long long vsigns0 = {*(const int64_t *)(signs64 + ((q2[0] >> 9))), *(const int64_t *)(signs64 + ((q2[1] >> 9)))}; + vector signed long long vsigns1 = {*(const int64_t *)(signs64 + ((q2[2] >> 9))), *(const int64_t *)(signs64 + ((q2[3] >> 9)))}; + vector signed long long vsigns2 = {*(const int64_t *)(signs64 + ((q2[4] >> 9))), *(const int64_t *)(signs64 + ((q2[5] >> 9)))}; + vector signed long long vsigns3 = {*(const int64_t *)(signs64 + ((q2[6] >> 9))), *(const int64_t *)(signs64 + ((q2[7] >> 9)))}; + q2 += 8; + + vector signed char q2x0 = (vector signed char)vec_mul((vector signed char)vsigns0, (vector signed char)aux64x2_0); + vector signed char q2x1 = (vector signed char)vec_mul((vector signed char)vsigns1, (vector signed char)aux64x2_1); + vector signed char q2x2 = (vector signed char)vec_mul((vector signed char)vsigns2, (vector signed char)aux64x2_2); + vector signed char q2x3 = (vector signed char)vec_mul((vector signed char)vsigns3, (vector signed char)aux64x2_3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); + + const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); + const uint16_t ls1 = (uint16_t)(sc[0] >> 4); + const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); + const uint16_t ls3 = (uint16_t)(sc[1] >> 4); + sc += 2; + + vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); + vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); + vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); + vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); + + vsumi0 = vec_add(vec_mule(qv0, vscales0), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales1), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales2), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales3), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales0), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales1), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales2), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales3), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.125f * vec_extract(vsumf0, 0); #else float sumf = 0.f; @@ -8908,6 +10491,124 @@ void ggml_vec_dot_iq2_s_q8_K(int n, float * restrict s, size_t bs, const void * *s = 0.125f * hsum_float_8(accumf); +#elif defined(__POWER9_VECTOR__) + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const vector unsigned char mask0 = vec_xl( 0, k_mask1); + const vector unsigned char mask1 = vec_xl(16, k_mask1); + const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint8_t * restrict q2 = x[i].qs; + const uint8_t * restrict qh = x[i].qh; + const uint16_t * restrict signs = (const uint16_t *)(x[i].qs + QK_K/8); + const uint8_t * restrict sc = x[i].scales; + const int8_t * restrict q8 = y[i].qs; + + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q2, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq2s_grid + (q2[0] | ((qh[0] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[1] | ((qh[0] << 6) & 0x300)))}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq2s_grid + (q2[2] | ((qh[0] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[3] | ((qh[0] << 2) & 0x300)))}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq2s_grid + (q2[4] | ((qh[1] << 8) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[5] | ((qh[1] << 6) & 0x300)))}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq2s_grid + (q2[6] | ((qh[1] << 4) & 0x300))), *(const int64_t *)(iq2s_grid + (q2[7] | ((qh[1] << 2) & 0x300)))}; + q2 += 8; + qh += 2; + + vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); + vector signed char vsigns23 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); + signs += 4; + + vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); + vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); + vector signed char vsigns2 = vec_perm(vsigns23, vsigns23, mask0); + vector signed char vsigns3 = vec_perm(vsigns23, vsigns23, mask1); + + vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); + vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); + vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); + vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); + + vector signed char q2x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux64x2_0), vsigns0); + vector signed char q2x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux64x2_1), vsigns1); + vector signed char q2x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux64x2_2), vsigns2); + vector signed char q2x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux64x2_3), vsigns3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q2x0, q8y0), vec_mulo(q2x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q2x1, q8y1), vec_mulo(q2x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q2x2, q8y2), vec_mulo(q2x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q2x3, q8y3), vec_mulo(q2x3, q8y3)); + + const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); + const uint16_t ls1 = (uint16_t)(sc[0] >> 4); + const uint16_t ls2 = (uint16_t)(sc[1] & 0xf); + const uint16_t ls3 = (uint16_t)(sc[1] >> 4); + sc += 2; + + vector signed short vscales0 = vec_splats((int16_t)(2*ls0+1)); + vector signed short vscales1 = vec_splats((int16_t)(2*ls1+1)); + vector signed short vscales2 = vec_splats((int16_t)(2*ls2+1)); + vector signed short vscales3 = vec_splats((int16_t)(2*ls3+1)); + + vsumi0 = vec_add(vec_mule(qv0, vscales0), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales1), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales2), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales3), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales0), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales1), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales2), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales3), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.125f * vec_extract(vsumf0, 0); #else float sumf = 0; @@ -9052,6 +10753,101 @@ void ggml_vec_dot_iq3_xxs_q8_K(int n, float * restrict s, size_t bs, const void *s = 0.25f * hsum_float_8(accumf); +#elif defined(__POWER9_VECTOR__) + const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs; + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + const uint8_t * restrict q3 = x[i].qs; + const uint32_t * restrict signs = (const uint32_t *)(x[i].qs + QK_K/4); + const int8_t * restrict q8 = y[i].qs; + +#pragma GCC unroll 1 + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q3, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector unsigned int aux32x4_0 = {iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]}; + vector unsigned int aux32x4_1 = {iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]}; + vector unsigned int aux32x4_2 = {iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]}; + vector unsigned int aux32x4_3 = {iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]}; + q3 += 16; + + vector unsigned long long aux64x2_0 = {(uint64_t)(signs64[(signs[0] >> 0) & 127]), (uint64_t)(signs64[(signs[0] >> 7) & 127])}; + vector unsigned long long aux64x2_1 = {(uint64_t)(signs64[(signs[0] >> 14) & 127]), (uint64_t)(signs64[(signs[0] >> 21) & 127])}; + vector unsigned long long aux64x2_2 = {(uint64_t)(signs64[(signs[1] >> 0) & 127]), (uint64_t)(signs64[(signs[1] >> 7) & 127])}; + vector unsigned long long aux64x2_3 = {(uint64_t)(signs64[(signs[1] >> 14) & 127]), (uint64_t)(signs64[(signs[1] >> 21) & 127])}; + + vector signed char q3x0 = vec_mul((vector signed char)aux64x2_0, (vector signed char)aux32x4_0); + vector signed char q3x1 = vec_mul((vector signed char)aux64x2_1, (vector signed char)aux32x4_1); + vector signed char q3x2 = vec_mul((vector signed char)aux64x2_2, (vector signed char)aux32x4_2); + vector signed char q3x3 = vec_mul((vector signed char)aux64x2_3, (vector signed char)aux32x4_3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); + + const uint16_t ls0 = (uint16_t)(signs[0] >> 28); + const uint16_t ls1 = (uint16_t)(signs[1] >> 28); + signs += 2; + + vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); + vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); + + vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = 0.25f * vec_extract(vsumf0, 0); #else uint32_t aux32; @@ -9279,6 +11075,124 @@ void ggml_vec_dot_iq3_s_q8_K (int n, float * restrict s, size_t bs, const void * *s = hsum_float_8(accumf); +#elif defined(__POWER9_VECTOR__) + static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03 + }; + + static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,}; + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const vector unsigned char mask0 = vec_xl( 0, k_mask1); + const vector unsigned char mask1 = vec_xl(16, k_mask1); + const vector signed char mask2 = (vector signed char)vec_xl( 0, k_mask2); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + const uint8_t * restrict q3 = x[i].qs; + const uint8_t * restrict qh = x[i].qh; + const uint16_t * restrict signs = (const uint16_t *)(x[i].signs); + const uint8_t * restrict sc = x[i].scales; + const int8_t * restrict q8 = y[i].qs; + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q3, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector unsigned int aux32x4_0 = {iq3s_grid[q3[ 0] | ((qh[0] << 8) & 256)], iq3s_grid[q3[ 1] | ((qh[0] << 7) & 256)], + iq3s_grid[q3[ 2] | ((qh[0] << 6) & 256)], iq3s_grid[q3[ 3] | ((qh[0] << 5) & 256)]}; + vector unsigned int aux32x4_1 = {iq3s_grid[q3[ 4] | ((qh[0] << 4) & 256)], iq3s_grid[q3[ 5] | ((qh[0] << 3) & 256)], + iq3s_grid[q3[ 6] | ((qh[0] << 2) & 256)], iq3s_grid[q3[ 7] | ((qh[0] << 1) & 256)]}; + vector unsigned int aux32x4_2 = {iq3s_grid[q3[ 8] | ((qh[1] << 8) & 256)], iq3s_grid[q3[ 9] | ((qh[1] << 7) & 256)], + iq3s_grid[q3[10] | ((qh[1] << 6) & 256)], iq3s_grid[q3[11] | ((qh[1] << 5) & 256)]}; + vector unsigned int aux32x4_3 = {iq3s_grid[q3[12] | ((qh[1] << 4) & 256)], iq3s_grid[q3[13] | ((qh[1] << 3) & 256)], + iq3s_grid[q3[14] | ((qh[1] << 2) & 256)], iq3s_grid[q3[15] | ((qh[1] << 1) & 256)]}; + q3 += 16; + qh += 2; + + vector signed char vsigns01 = (vector signed char)vec_splats(*(const uint32_t *)&signs[0]); + vector signed char vsigns02 = (vector signed char)vec_splats(*(const uint32_t *)&signs[2]); + signs += 4; + + vector signed char vsigns0 = vec_perm(vsigns01, vsigns01, mask0); + vector signed char vsigns1 = vec_perm(vsigns01, vsigns01, mask1); + vector signed char vsigns2 = vec_perm(vsigns02, vsigns02, mask0); + vector signed char vsigns3 = vec_perm(vsigns02, vsigns02, mask1); + + vsigns0 = (vector signed char)vec_cmpeq(vec_and(vsigns0, mask2), mask2); + vsigns1 = (vector signed char)vec_cmpeq(vec_and(vsigns1, mask2), mask2); + vsigns2 = (vector signed char)vec_cmpeq(vec_and(vsigns2, mask2), mask2); + vsigns3 = (vector signed char)vec_cmpeq(vec_and(vsigns3, mask2), mask2); + + vector signed char q3x0 = vec_sub(vec_xor(vsigns0, (vector signed char)aux32x4_0), vsigns0); + vector signed char q3x1 = vec_sub(vec_xor(vsigns1, (vector signed char)aux32x4_1), vsigns1); + vector signed char q3x2 = vec_sub(vec_xor(vsigns2, (vector signed char)aux32x4_2), vsigns2); + vector signed char q3x3 = vec_sub(vec_xor(vsigns3, (vector signed char)aux32x4_3), vsigns3); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q3x0, q8y0), vec_mulo(q3x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q3x1, q8y1), vec_mulo(q3x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q3x2, q8y2), vec_mulo(q3x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q3x3, q8y3), vec_mulo(q3x3, q8y3)); + + const uint16_t ls0 = (uint16_t)(sc[0] & 0xf); + const uint16_t ls1 = (uint16_t)(sc[0] >> 4); + sc ++; + + vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); + vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); + + vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else float sumf = 0.f; @@ -9433,6 +11347,113 @@ void ggml_vec_dot_iq1_s_q8_K (int n, float * restrict s, size_t bs, const void *s = hsum_float_8(accum) + IQ1S_DELTA * accum1; +#elif defined(__POWER9_VECTOR__) + const vector unsigned char v0 = vec_splats((unsigned char)0x0); + const vector unsigned short vsign = vec_splats((unsigned short)0x8000); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + for (int i = 0; i < nb; ++i) { + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[i].d)); + vector float vyd = vec_splats(y[i].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + vector signed int vsumi8 = vec_splats((int32_t)0); + + const uint8_t * restrict q1 = x[i].qs; + const uint16_t * restrict qh = x[i].qh; + const int8_t * restrict q8 = y[i].qs; + const int16_t * restrict qs = y[i].bsums; + + for (int j = 0; j < QK_K/32; j += 2) { + __builtin_prefetch(q1, 0, 1); + __builtin_prefetch(qh, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed long long aux64x2_0 = {*(const int64_t *)(iq1s_grid + (q1[0] | ((qh[0] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[1] | ((qh[0] << 5) & 0x700)))}; + vector signed long long aux64x2_1 = {*(const int64_t *)(iq1s_grid + (q1[2] | ((qh[0] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[3] | ((qh[0] >> 1) & 0x700)))}; + vector signed long long aux64x2_2 = {*(const int64_t *)(iq1s_grid + (q1[4] | ((qh[1] << 8) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[5] | ((qh[1] << 5) & 0x700)))}; + vector signed long long aux64x2_3 = {*(const int64_t *)(iq1s_grid + (q1[6] | ((qh[1] << 2) & 0x700))), *(const int64_t *)(iq1s_grid + (q1[7] | ((qh[1] >> 1) & 0x700)))}; + q1 += 8; + + vector signed char q1x0 = (vector signed char)aux64x2_0; + vector signed char q1x1 = (vector signed char)aux64x2_1; + vector signed char q1x2 = (vector signed char)aux64x2_2; + vector signed char q1x3 = (vector signed char)aux64x2_3; + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q1x0, q8y0), vec_mulo(q1x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q1x1, q8y1), vec_mulo(q1x1, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q1x2, q8y2), vec_mulo(q1x2, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q1x3, q8y3), vec_mulo(q1x3, q8y3)); + + const uint16_t ls0 = (uint16_t)((qh[0] >> 12) & 7); + const uint16_t ls1 = (uint16_t)((qh[1] >> 12) & 7); + + vector signed short vscales01 = (vector signed short)vec_splats((uint16_t)(2*ls0+1)); + vector signed short vscales23 = (vector signed short)vec_splats((uint16_t)(2*ls1+1)); + vector signed short vscales = vec_sld(vscales23, vscales01, 8); + + vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7); + + vector signed short q8ysums = vec_xl_len(qs, 8); + qs += 4; + q8ysums = vec_mergeh(q8ysums, (vector signed short)v0); + + vector signed short qxh = (vector signed short)vec_sld(vec_splats(qh[1]), vec_splats(qh[0]), 8); + qh += 2; + vector bool short vsel = vec_cmpge(qxh, (vector signed short)v0); + + vector signed short q8ysum = vec_sel((vector signed short)vec_xor((vector unsigned short)q8ysums, vsign), q8ysums, vsel); + + vsumi8 = vec_add(vec_mule(q8ysum, vscales), vsumi8); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + + vsumf0 = vec_madd(vec_ctf(vsumi8, 0), vec_mul(vd, vec_splats(IQ1S_DELTA)), vsumf0); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else float sumf = 0; @@ -9789,6 +11810,51 @@ void ggml_vec_dot_iq4_nl_q8_0(int n, float * restrict s, size_t bs, const void * *s = hsum_float_8(_mm256_add_ps(accum1, accum2)); +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + + const vector signed char values = vec_xl( 0, kvalues_iq4nl); + +#pragma GCC unroll 4 + for (int ib = 0; ib < nb; ++ib) { + __builtin_prefetch(x[ib].qs, 0, 1); + __builtin_prefetch(y[ib].qs, 0, 1); + + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ib].d)); + vector float vyd = vec_splats(GGML_FP16_TO_FP32(y[ib].d)); + vector float vd = vec_mul(vxd, vyd); + + vector signed char qxs = (vector signed char)vec_xl( 0, x[ib].qs); + vector signed char q4x0 = vec_and(qxs, lowMask); + vector signed char q4x1 = vec_sr(qxs, v4); + + q4x0 = vec_perm(values, values, (vector unsigned char)q4x0); + q4x1 = vec_perm(values, values, (vector unsigned char)q4x1); + + vector signed char q8y0 = vec_xl( 0, y[ib].qs); + vector signed char q8y1 = vec_xl(16, y[ib].qs); + + vector signed short qv0 = vec_add(vec_mule(q4x0, q8y0), vec_mulo(q4x0, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q4x1, q8y1), vec_mulo(q4x1, q8y1)); + + vector signed int vsumi0 = vec_add(vec_unpackh(qv0), vec_unpackl(qv0)); + vector signed int vsumi1 = vec_add(vec_unpackh(qv1), vec_unpackl(qv1)); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + } + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else float sumf = 0; for (int ib = 0; ib < nb; ++ib) { @@ -9900,6 +11966,105 @@ void ggml_vec_dot_iq4_xs_q8_K(int n, float * restrict s, size_t bs, const void * *s = hsum_float_8(accum); +#elif defined(__POWER9_VECTOR__) + const vector signed char lowMask = vec_splats((signed char)0xF); + const vector unsigned char v4 = vec_splats((unsigned char)0x4); + + vector float vsumf0 = vec_splats(0.0f); + vector float vsumf1 = vec_splats(0.0f); + vector float vsumf2 = vec_splats(0.0f); + vector float vsumf3 = vec_splats(0.0f); + + const vector signed char values = vec_xl( 0, kvalues_iq4nl); + + for (int ibl = 0; ibl < nb; ++ibl) { + + vector float vxd = vec_splats(GGML_FP16_TO_FP32(x[ibl].d)); + vector float vyd = vec_splats(y[ibl].d); + vector float vd = vec_mul(vxd, vyd); + + vector signed int vsumi0 = vec_splats((int32_t)0); + vector signed int vsumi1 = vec_splats((int32_t)0); + vector signed int vsumi2 = vec_splats((int32_t)0); + vector signed int vsumi3 = vec_splats((int32_t)0); + vector signed int vsumi4 = vec_splats((int32_t)0); + vector signed int vsumi5 = vec_splats((int32_t)0); + vector signed int vsumi6 = vec_splats((int32_t)0); + vector signed int vsumi7 = vec_splats((int32_t)0); + + uint16_t h = x[ibl].scales_h; + + const uint8_t * restrict q4 = x[ibl].qs; + const uint8_t * restrict sc = x[ibl].scales_l; + const int8_t * restrict q8 = y[ibl].qs; + + for (int ib = 0; ib < QK_K/64; ib ++ ) { + __builtin_prefetch(q4, 0, 1); + __builtin_prefetch(q8, 0, 1); + + vector signed char qxs0 = (vector signed char)vec_xl( 0, q4); + vector signed char qxs1 = (vector signed char)vec_xl(16, q4); + q4 += 32; + + vector signed char q4x00 = (vector signed char)vec_and(qxs0, lowMask); + vector signed char q4x01 = (vector signed char)vec_sr(qxs0, v4); + vector signed char q4x10 = (vector signed char)vec_and(qxs1, lowMask); + vector signed char q4x11 = (vector signed char)vec_sr(qxs1, v4); + + q4x00 = vec_perm(values, values, (vector unsigned char)q4x00); + q4x01 = vec_perm(values, values, (vector unsigned char)q4x01); + q4x10 = vec_perm(values, values, (vector unsigned char)q4x10); + q4x11 = vec_perm(values, values, (vector unsigned char)q4x11); + + vector signed char q8y0 = vec_xl( 0, q8); + vector signed char q8y1 = vec_xl(16, q8); + vector signed char q8y2 = vec_xl(32, q8); + vector signed char q8y3 = vec_xl(48, q8); + q8 += 64; + + vector signed short qv0 = vec_add(vec_mule(q4x00, q8y0), vec_mulo(q4x00, q8y0)); + vector signed short qv1 = vec_add(vec_mule(q4x01, q8y1), vec_mulo(q4x01, q8y1)); + vector signed short qv2 = vec_add(vec_mule(q4x10, q8y2), vec_mulo(q4x10, q8y2)); + vector signed short qv3 = vec_add(vec_mule(q4x11, q8y3), vec_mulo(q4x11, q8y3)); + + const uint16_t ls0 = (uint16_t)(((sc[0] & 0xf) | ((h << 4) & 0x30)) - 32); + const uint16_t ls1 = (uint16_t)(((sc[0] >> 4) | ((h << 2) & 0x30)) - 32); + h >>= 4; + sc ++; + + vector signed short vscales01 = vec_splats((int16_t)ls0); + vector signed short vscales23 = vec_splats((int16_t)ls1); + + vsumi0 = vec_add(vec_mule(qv0, vscales01), vsumi0); + vsumi1 = vec_add(vec_mule(qv1, vscales01), vsumi1); + vsumi2 = vec_add(vec_mule(qv2, vscales23), vsumi2); + vsumi3 = vec_add(vec_mule(qv3, vscales23), vsumi3); + vsumi4 = vec_add(vec_mulo(qv0, vscales01), vsumi4); + vsumi5 = vec_add(vec_mulo(qv1, vscales01), vsumi5); + vsumi6 = vec_add(vec_mulo(qv2, vscales23), vsumi6); + vsumi7 = vec_add(vec_mulo(qv3, vscales23), vsumi7); + } + + vsumi0 = vec_add(vsumi0, vsumi4); + vsumi1 = vec_add(vsumi1, vsumi5); + vsumi2 = vec_add(vsumi2, vsumi6); + vsumi3 = vec_add(vsumi3, vsumi7); + + vsumf0 = vec_madd(vec_ctf(vsumi0, 0), vd, vsumf0); + vsumf1 = vec_madd(vec_ctf(vsumi1, 0), vd, vsumf1); + vsumf2 = vec_madd(vec_ctf(vsumi2, 0), vd, vsumf2); + vsumf3 = vec_madd(vec_ctf(vsumi3, 0), vd, vsumf3); + } + + vsumf0 = vec_add(vsumf0, vsumf2); + vsumf1 = vec_add(vsumf1, vsumf3); + + vsumf0 = vec_add(vsumf0, vsumf1); + + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 4)); + vsumf0 = vec_add(vsumf0, vec_sld(vsumf0, vsumf0, 8)); + + *s = vec_extract(vsumf0, 0); #else float sumf = 0; for (int ibl = 0; ibl < nb; ++ibl) { From 182adefcf36fc5f4263082ff032c0796fda65578 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Przemys=C5=82aw=20Pawe=C5=82czyk?= Date: Wed, 8 May 2024 17:33:43 +0200 Subject: [PATCH 17/51] ggml : expose SSE3 and SSSE3 for MSVC when AVX is available (whisper/2128) --- ggml-impl.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/ggml-impl.h b/ggml-impl.h index d85b152bf..59684fa81 100644 --- a/ggml-impl.h +++ b/ggml-impl.h @@ -120,9 +120,16 @@ extern "C" { #ifndef __F16C__ #define __F16C__ #endif +#endif + +// __SSE3__ and __SSSE3__ are not defined in MSVC, but SSE3/SSSE3 are present when AVX/AVX2/AVX512 are available +#if defined(_MSC_VER) && (defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__)) #ifndef __SSE3__ #define __SSE3__ #endif +#ifndef __SSSE3__ +#define __SSSE3__ +#endif #endif // 16-bit float From c3c88f296a72432edb697ac8026dbf2ec18f2b21 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 12 May 2024 20:36:31 +0300 Subject: [PATCH 18/51] ggml : try fix ppc64 (whisper/0) --- ggml-quants.c | 2 +- ggml.c | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/ggml-quants.c b/ggml-quants.c index f711bd013..9e62a3f32 100644 --- a/ggml-quants.c +++ b/ggml-quants.c @@ -11425,7 +11425,7 @@ void ggml_vec_dot_iq1_s_q8_K (int n, float * restrict s, size_t bs, const void vector signed short qxh = (vector signed short)vec_sld(vec_splats(qh[1]), vec_splats(qh[0]), 8); qh += 2; - vector bool short vsel = vec_cmpge(qxh, (vector signed short)v0); + vector __bool short vsel = vec_cmpge(qxh, (vector signed short)v0); vector signed short q8ysum = vec_sel((vector signed short)vec_xor((vector unsigned short)q8ysums, vsign), q8ysums, vsel); diff --git a/ggml.c b/ggml.c index b96a82a41..d443a9b42 100644 --- a/ggml.c +++ b/ggml.c @@ -1306,6 +1306,8 @@ static inline void __avx_f32cx8_store(ggml_fp16_t *x, __m256 y) { #define GGML_F16_VEC_ZERO GGML_F32x4_ZERO #define GGML_F16_VEC_SET1 GGML_F32x4_SET1 #define GGML_F16_VEC_FMA GGML_F32x4_FMA +#define GGML_F16_VEC_ADD GGML_F32x4_ADD +#define GGML_F16_VEC_MUL GGML_F32x4_MUL #define GGML_F16_VEC_REDUCE GGML_F32x4_REDUCE // Use vec_xl, not vec_ld, in case the load address is not aligned. #define GGML_F16_VEC_LOAD(p, i) (i & 0x1) ? \ From f308ea705974dff62a1fe5367d776ad9d5109239 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Mon, 13 May 2024 11:01:07 +0300 Subject: [PATCH 19/51] metal : tune soft_max number of threads (whisper/0) --- ggml-metal.m | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/ggml-metal.m b/ggml-metal.m index 28dec762a..bfa352c3a 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -1378,7 +1378,7 @@ static enum ggml_status ggml_metal_graph_compute( const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16); if (ne00%4 == 0) { - while (nth < ne00/4 && nth < 256) { + while (nth < ne00/4 && nth*ne01*ne02*ne03 < 256) { nth *= 2; } if (use_f16) { @@ -1387,7 +1387,7 @@ static enum ggml_status ggml_metal_graph_compute( pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SOFT_MAX_F32_4].pipeline; } } else { - while (nth < ne00 && nth < 1024) { + while (nth < ne00 && nth*ne01*ne02*ne03 < 256) { nth *= 2; } if (use_f16) { From a5e3fde8578d54b98d941344a4da150669af200d Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 14 May 2024 15:33:16 +0300 Subject: [PATCH 20/51] sync : ggml ggml-ci --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index 1ea320429..b2b5f8fc1 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -30f54cbb3ada3e4c5bc6924de3e5918e5be4ff11 +fafd5e7f89382b8cfb51e3dac8d4f1500ca44918 From e8a7fd4fb06d82f663850c21fcf86c0fb98ad9b4 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 14 May 2024 19:09:30 +0300 Subject: [PATCH 21/51] metal : support FA without mask + add asserts (#7278) * ggml : fa without mask + add asserts ggml-ci * metal : support non-contiguous KV ggml-ci --- ggml-metal.m | 69 +++++++++++++++++++++----------------- ggml-metal.metal | 53 +++++++++++------------------ ggml.c | 10 ++++++ ggml.h | 3 +- tests/test-backend-ops.cpp | 25 ++++++++------ 5 files changed, 85 insertions(+), 75 deletions(-) diff --git a/ggml-metal.m b/ggml-metal.m index bfa352c3a..390a1cd78 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -2512,13 +2512,14 @@ static enum ggml_status ggml_metal_graph_compute( } break; case GGML_OP_FLASH_ATTN_EXT: { - GGML_ASSERT(ne00 % 4 == 0); + GGML_ASSERT(ne00 % 4 == 0); + GGML_ASSERT(ne11 % 32 == 0); + GGML_ASSERT(src0->type == GGML_TYPE_F32); - struct ggml_tensor * src3 = gf->nodes[i]->src[3]; + GGML_ASSERT(ggml_are_same_shape (src1, src2)); - GGML_ASSERT(ggml_are_same_shape(src1, src2)); - GGML_ASSERT(src3); + struct ggml_tensor * src3 = gf->nodes[i]->src[3]; size_t offs_src3 = 0; @@ -2528,6 +2529,11 @@ static enum ggml_status ggml_metal_graph_compute( GGML_ASSERT(!src3 || src3->ne[1] >= GGML_PAD(src0->ne[1], 8) && "the Flash-Attention Metal kernel requires the mask to be padded to 8 and at least n_queries big"); + const uint64_t nb20 = src2 ? src2->nb[0] : 0; GGML_UNUSED(nb20); + const uint64_t nb21 = src2 ? src2->nb[1] : 0; + const uint64_t nb22 = src2 ? src2->nb[2] : 0; + const uint64_t nb23 = src2 ? src2->nb[3] : 0; + const int64_t ne30 = src3 ? src3->ne[0] : 0; GGML_UNUSED(ne30); //const int64_t ne31 = src3 ? src3->ne[1] : 0; const int64_t ne32 = src3 ? src3->ne[2] : 0; GGML_UNUSED(ne32); @@ -2590,34 +2596,35 @@ static enum ggml_status ggml_metal_graph_compute( [encoder setBuffer:id_src0 offset:offs_src0 atIndex:0]; [encoder setBuffer:id_src1 offset:offs_src1 atIndex:1]; [encoder setBuffer:id_src2 offset:offs_src2 atIndex:2]; - [encoder setBuffer:id_src3 offset:offs_src3 atIndex:3]; + if (id_src3) { + [encoder setBuffer:id_src3 offset:offs_src3 atIndex:3]; + } else { + [encoder setBuffer:id_src0 offset:offs_src0 atIndex:3]; + } [encoder setBuffer:id_dst offset:offs_dst atIndex:4]; - [encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:5]; - [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:6]; - [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:7]; - [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:8]; - [encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:9]; - [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:10]; - [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:11]; - [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:12]; - [encoder setBytes:&ne10 length:sizeof( int64_t) atIndex:13]; - [encoder setBytes:&ne11 length:sizeof( int64_t) atIndex:14]; - [encoder setBytes:&ne12 length:sizeof( int64_t) atIndex:15]; - [encoder setBytes:&ne13 length:sizeof( int64_t) atIndex:16]; - [encoder setBytes:&nb10 length:sizeof(uint64_t) atIndex:17]; - [encoder setBytes:&nb11 length:sizeof(uint64_t) atIndex:18]; - [encoder setBytes:&nb12 length:sizeof(uint64_t) atIndex:19]; - [encoder setBytes:&nb13 length:sizeof(uint64_t) atIndex:20]; - [encoder setBytes:&nb31 length:sizeof(uint64_t) atIndex:21]; - [encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:22]; - [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:23]; - [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:24]; - [encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:25]; - [encoder setBytes:&scale length:sizeof( float) atIndex:26]; - [encoder setBytes:&max_bias length:sizeof( float) atIndex:27]; - [encoder setBytes:&m0 length:sizeof(m0) atIndex:28]; - [encoder setBytes:&m1 length:sizeof(m1) atIndex:29]; - [encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:30]; + [encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:5]; + [encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:6]; + [encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:7]; + [encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:8]; + [encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:9]; + [encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:10]; + [encoder setBytes:&ne11 length:sizeof( int64_t) atIndex:11]; + [encoder setBytes:&ne12 length:sizeof( int64_t) atIndex:12]; + [encoder setBytes:&ne13 length:sizeof( int64_t) atIndex:13]; + [encoder setBytes:&nb11 length:sizeof(uint64_t) atIndex:14]; + [encoder setBytes:&nb12 length:sizeof(uint64_t) atIndex:15]; + [encoder setBytes:&nb13 length:sizeof(uint64_t) atIndex:16]; + [encoder setBytes:&nb21 length:sizeof(uint64_t) atIndex:17]; + [encoder setBytes:&nb22 length:sizeof(uint64_t) atIndex:18]; + [encoder setBytes:&nb23 length:sizeof(uint64_t) atIndex:19]; + [encoder setBytes:&nb31 length:sizeof(uint64_t) atIndex:20]; + [encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:21]; + [encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:22]; + [encoder setBytes:&scale length:sizeof( float) atIndex:23]; + [encoder setBytes:&max_bias length:sizeof( float) atIndex:24]; + [encoder setBytes:&m0 length:sizeof(m0) atIndex:25]; + [encoder setBytes:&m1 length:sizeof(m1) atIndex:26]; + [encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:27]; if (!use_vec_kernel) { // half8x8 kernel diff --git a/ggml-metal.metal b/ggml-metal.metal index 7af4e8f93..57fdf564e 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -2049,27 +2049,24 @@ typedef void (flash_attn_ext_f16_t)( device const char * v, device const char * mask, device float * dst, - constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, - constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, - constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, - constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, + constant uint64_t & nb21, + constant uint64_t & nb22, + constant uint64_t & nb23, constant uint64_t & nb31, - constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, - constant int64_t & ne3, constant float & scale, constant float & max_bias, constant float & m0, @@ -2090,27 +2087,24 @@ kernel void kernel_flash_attn_ext_f16( device const char * v, device const char * mask, device float * dst, - constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, - constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, - constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, - constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, + constant uint64_t & nb21, + constant uint64_t & nb22, + constant uint64_t & nb23, constant uint64_t & nb31, - constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, - constant int64_t & ne3, constant float & scale, constant float & max_bias, constant float & m0, @@ -2180,10 +2174,6 @@ kernel void kernel_flash_attn_ext_f16( const short ne22 = ne12; const short ne23 = ne13; - const uint nb21 = nb11; - const uint nb22 = nb12; - const uint nb23 = nb13; - // broadcast const short rk2 = ne02/ne12; const short rk3 = ne03/ne13; @@ -2247,11 +2237,16 @@ kernel void kernel_flash_attn_ext_f16( simdgroup_multiply_accumulate(mqk, mq[i], mk, mqk); } - // mqk = mqk*scale + mask*slope - simdgroup_half8x8 mm; - simdgroup_load(mm, mp + ic + 8*cc, nb31/sizeof(half), 0, false); - simdgroup_multiply(mm, mslope, mm); - simdgroup_multiply_accumulate(mqk, mqk, mscale, mm); + if (mask != q) { + // mqk = mqk*scale + mask*slope + simdgroup_half8x8 mm; + simdgroup_load(mm, mp + ic + 8*cc, nb31/sizeof(half), 0, false); + simdgroup_multiply(mm, mslope, mm); + simdgroup_multiply_accumulate(mqk, mqk, mscale, mm); + } else { + // mqk = mqk*scale + simdgroup_multiply(mqk, mscale, mqk); + } simdgroup_store(mqk, ss + 8*cc, TF, 0, false); } @@ -2425,27 +2420,24 @@ kernel void kernel_flash_attn_ext_vec_f16( device const char * v, device const char * mask, device float * dst, - constant int64_t & ne00, constant int64_t & ne01, constant int64_t & ne02, constant int64_t & ne03, - constant uint64_t & nb00, constant uint64_t & nb01, constant uint64_t & nb02, constant uint64_t & nb03, - constant int64_t & ne10, constant int64_t & ne11, constant int64_t & ne12, constant int64_t & ne13, - constant uint64_t & nb10, constant uint64_t & nb11, constant uint64_t & nb12, constant uint64_t & nb13, + constant uint64_t & nb21, + constant uint64_t & nb22, + constant uint64_t & nb23, constant uint64_t & nb31, - constant int64_t & ne0, constant int64_t & ne1, constant int64_t & ne2, - constant int64_t & ne3, constant float & scale, constant float & max_bias, constant float & m0, @@ -2521,10 +2513,6 @@ kernel void kernel_flash_attn_ext_vec_f16( const short ne22 = ne12; const short ne23 = ne13; - const uint nb21 = nb11; - const uint nb22 = nb12; - const uint nb23 = nb13; - // broadcast const short rk2 = ne02/ne12; const short rk3 = ne03/ne13; @@ -2589,8 +2577,7 @@ kernel void kernel_flash_attn_ext_vec_f16( // mqk = mqk*scale + mask*slope if (tiisg == 0) { - float4 mm = (float4) mp4[ic/4 + cc]; - mqk = mqk*scale + mm*slope; + mqk = mqk*scale + ((mask != q) ? ((float4) mp4[ic/4 + cc])*slope : (float4) 0.0f); ss4[cc] = mqk; } diff --git a/ggml.c b/ggml.c index d443a9b42..03b609ddd 100644 --- a/ggml.c +++ b/ggml.c @@ -2824,6 +2824,16 @@ bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor (t0->ne[3] == t1->ne[3] ); } +bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { + static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); + + return + (t0->nb[0] == t1->nb[0] ) && + (t0->nb[1] == t1->nb[1] ) && + (t0->nb[2] == t1->nb[2] ) && + (t0->nb[3] == t1->nb[3] ); +} + // check if t1 can be represented as a repeatition of t0 static inline bool ggml_can_repeat(const struct ggml_tensor * t0, const struct ggml_tensor * t1) { static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function"); diff --git a/ggml.h b/ggml.h index 3fe95ed57..25f4f73a8 100644 --- a/ggml.h +++ b/ggml.h @@ -766,7 +766,8 @@ extern "C" { GGML_API bool ggml_is_3d (const struct ggml_tensor * tensor); GGML_API int ggml_n_dims (const struct ggml_tensor * tensor); // returns 1 for scalars - GGML_API bool ggml_are_same_shape(const struct ggml_tensor * t0, const struct ggml_tensor * t1); + GGML_API bool ggml_are_same_shape (const struct ggml_tensor * t0, const struct ggml_tensor * t1); + GGML_API bool ggml_are_same_stride(const struct ggml_tensor * t0, const struct ggml_tensor * t1); // use this to compute the memory overhead of a tensor GGML_API size_t ggml_tensor_overhead(void); diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index 45a2cb85a..f080f7e22 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -1487,25 +1487,27 @@ struct test_flash_attn_ext : public test_case { const int64_t kv; // kv size const int64_t nb; // batch size + const bool mask; // use mask + const float max_bias; // ALiBi std::string vars() override { - return VARS_TO_STR5(hs, nh, kv, nb, max_bias); + return VARS_TO_STR6(hs, nh, kv, nb, mask, max_bias); } double max_nmse_err() override { return 5e-4; } - test_flash_attn_ext(int64_t hs = 128, int64_t nh = 32, int64_t kv = 96, int64_t nb = 8, float max_bias = 0.0f) - : hs(hs), nh(nh), kv(kv), nb(nb), max_bias(max_bias) {} + test_flash_attn_ext(int64_t hs = 128, int64_t nh = 32, int64_t kv = 96, int64_t nb = 8, bool mask = true, float max_bias = 0.0f) + : hs(hs), nh(nh), kv(kv), nb(nb), mask(mask), max_bias(max_bias) {} ggml_tensor * build_graph(ggml_context * ctx) override { ggml_tensor * q = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, hs, nb, nh, 1); ggml_tensor * k = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, hs, kv, nh, 1); ggml_tensor * v = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, hs, kv, nh, 1); - ggml_tensor * mask = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, 1); - ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, mask, 1.0f/sqrtf(hs), max_bias); + ggml_tensor * m = mask ? ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, 1) : nullptr; + ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, m, 1.0f/sqrtf(hs), max_bias); return out; } }; @@ -2175,11 +2177,14 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op test_cases.emplace_back(new test_leaky_relu()); for (int hs : { 64, 80, 128, 256, }) { - for (float max_bias : {0.0f, 8.0f}) { - for (int nh : { 32, }) { - for (int kv : { 512, 1024, }) { - for (int nb : { 1, 2, 4, 8, }) { - test_cases.emplace_back(new test_flash_attn_ext(hs, nh, kv, nb, max_bias)); + for (bool mask : { true, false } ) { + for (float max_bias : { 0.0f, 8.0f }) { + if (!mask && max_bias > 0.0f) continue; + for (int nh : { 32, }) { + for (int kv : { 512, 1024, }) { + for (int nb : { 1, 2, 4, 8, }) { + test_cases.emplace_back(new test_flash_attn_ext(hs, nh, kv, nb, mask, max_bias)); + } } } } From 9f773486ab78d65f5cca3f7e31c862b7043bf721 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 14 May 2024 19:14:38 +0300 Subject: [PATCH 22/51] script : sync ggml-rpc --- scripts/sync-ggml-am.sh | 4 ++++ scripts/sync-ggml.sh | 2 ++ 2 files changed, 6 insertions(+) diff --git a/scripts/sync-ggml-am.sh b/scripts/sync-ggml-am.sh index 7c157eb4c..cf22afc41 100755 --- a/scripts/sync-ggml-am.sh +++ b/scripts/sync-ggml-am.sh @@ -112,6 +112,8 @@ if [ -f $SRC_LLAMA/ggml-src.patch ]; then # src/ggml-opencl.h -> ggml-opencl.h # src/ggml-quants.c -> ggml-quants.c # src/ggml-quants.h -> ggml-quants.h + # src/ggml-rpc.cpp -> ggml-rpc.cpp + # src/ggml-rpc.h -> ggml-rpc.h # src/ggml-sycl.cpp -> ggml-sycl.cpp # src/ggml-sycl.h -> ggml-sycl.h # src/ggml-vulkan.cpp -> ggml-vulkan.cpp @@ -149,6 +151,8 @@ if [ -f $SRC_LLAMA/ggml-src.patch ]; then -e 's/src\/ggml-opencl\.h/ggml-opencl.h/g' \ -e 's/src\/ggml-quants\.c/ggml-quants.c/g' \ -e 's/src\/ggml-quants\.h/ggml-quants.h/g' \ + -e 's/src\/ggml-rpc\.cpp/ggml-rpc.cpp/g' \ + -e 's/src\/ggml-rpc\.h/ggml-rpc.h/g' \ -e 's/src\/ggml-sycl\.cpp/ggml-sycl.cpp/g' \ -e 's/src\/ggml-sycl\.h/ggml-sycl.h/g' \ -e 's/src\/ggml-vulkan\.cpp/ggml-vulkan.cpp/g' \ diff --git a/scripts/sync-ggml.sh b/scripts/sync-ggml.sh index 80c43976f..ec47fb27c 100755 --- a/scripts/sync-ggml.sh +++ b/scripts/sync-ggml.sh @@ -20,6 +20,8 @@ cp -rpv ../ggml/src/ggml-opencl.cpp ./ggml-opencl.cpp cp -rpv ../ggml/src/ggml-opencl.h ./ggml-opencl.h cp -rpv ../ggml/src/ggml-quants.c ./ggml-quants.c cp -rpv ../ggml/src/ggml-quants.h ./ggml-quants.h +cp -rpv ../ggml/src/ggml-rpc.cpp ./ggml-rpc.cpp +cp -rpv ../ggml/src/ggml-rpc.h ./ggml-rpc.h cp -rpv ../ggml/src/ggml-sycl.cpp ./ggml-sycl.cpp cp -rpv ../ggml/src/ggml-sycl.h ./ggml-sycl.h cp -rpv ../ggml/src/ggml-vulkan.cpp ./ggml-vulkan.cpp From 583fd6b000ec9ad1b465b5c98524f4a0ae388077 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Wed, 15 May 2024 08:44:16 +0200 Subject: [PATCH 23/51] server bench: fix bench not waiting for model load (#7284) --- examples/server/bench/bench.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/examples/server/bench/bench.py b/examples/server/bench/bench.py index 86c5de101..25ac29c4c 100644 --- a/examples/server/bench/bench.py +++ b/examples/server/bench/bench.py @@ -293,13 +293,14 @@ def start_server_background(args): def is_server_listening(server_fqdn, server_port): - with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: - result = sock.connect_ex((server_fqdn, server_port)) - _is_server_listening = result == 0 - if _is_server_listening: - print(f"server is listening on {server_fqdn}:{server_port}...") - return _is_server_listening - + try: + url = f"{server_fqdn}:{server_port}/health" + if not url.startswith("http://"): + url = f"http://{url}" + result = requests.get(url) + return result.status_code == 200 + except Exception: + return False def escape_metric_name(metric_name): return re.sub('[^A-Z0-9]', '_', metric_name.upper()) From 48aa8fd1f213a69b41569f809cc954f24dbc4366 Mon Sep 17 00:00:00 2001 From: John Balis Date: Wed, 15 May 2024 03:52:33 -0500 Subject: [PATCH 24/51] ggml : add `ggml_upscale_ext` (ggml/814) * initial commit with CPU implementation of upscale to shape and test, cuda implementation next * experimental commit to see if dst shape is correct * test version * test * removed unnecessary params * refactor * fixed tests * ggml : metal impl + cleanup + sycl dev warnings * patched ggml_upscale cuda op to handle non-contiguous tensors, added test for non-contiguous behavior * metal : fix upsacle op to support nb00 + style --------- Co-authored-by: Georgi Gerganov --- ggml-cuda/upscale.cu | 63 +++++++++++++++++++------------------ ggml-metal.m | 10 ++++-- ggml-metal.metal | 21 ++++++++----- ggml-sycl.cpp | 4 +++ ggml.c | 64 ++++++++++++++++++++++++++++---------- ggml.h | 12 +++++++ tests/test-backend-ops.cpp | 32 +++++++++++++++++-- 7 files changed, 146 insertions(+), 60 deletions(-) diff --git a/ggml-cuda/upscale.cu b/ggml-cuda/upscale.cu index 2f62fed48..cf513c3ad 100644 --- a/ggml-cuda/upscale.cu +++ b/ggml-cuda/upscale.cu @@ -1,35 +1,36 @@ #include "upscale.cuh" -static __global__ void upscale_f32(const float * x, float * dst, const int ne00, const int ne00xne01, const int scale_factor) { - // blockIdx.z: idx of ne02*ne03 - // blockIdx.y: idx of ne01*scale_factor, aka ne1 - // blockIDx.x: idx of ne00*scale_factor / BLOCK_SIZE - // ne00xne01: ne00 * ne01 - int ne0 = ne00 * scale_factor; - int nidx = threadIdx.x + blockIdx.x * blockDim.x; - if (nidx >= ne0) { +static __global__ void upscale_f32(const float * x, float * dst, + const int nb00, const int nb01, const int nb02, const int nb03, + const int ne10, const int ne11, const int ne12, const int ne13, + const float sf0, const float sf1, const float sf2, const float sf3) { + int index = threadIdx.x + blockIdx.x * blockDim.x; + if (index >= ne10 * ne11 * ne12 * ne13) { return; } - // operation - int i00 = nidx / scale_factor; - int i01 = blockIdx.y / scale_factor; - int offset_src = - i00 + - i01 * ne00 + - blockIdx.z * ne00xne01; - int offset_dst = - nidx + - blockIdx.y * ne0 + - blockIdx.z * ne0 * gridDim.y; - dst[offset_dst] = x[offset_src]; + + int i10 = index % ne10; + int i11 = (index / ne10) % ne11; + int i12 = (index / (ne10 * ne11)) % ne12; + int i13 = (index / (ne10 * ne11 * ne12)) % ne13; + + int i00 = i10 / sf0; + int i01 = i11 / sf1; + int i02 = i12 / sf2; + int i03 = i13 / sf3; + + dst[index] = *(float *)((char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00); } -static void upscale_f32_cuda(const float * x, float * dst, const int ne00, const int ne01, const int ne02, const int ne03, - const int scale_factor, cudaStream_t stream) { - int ne0 = (ne00 * scale_factor); - int num_blocks = (ne0 + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE; - dim3 gridDim(num_blocks, (ne01 * scale_factor), ne02*ne03); - upscale_f32<<>>(x, dst, ne00, ne00 * ne01, scale_factor); +static void upscale_f32_cuda(const float * x, float * dst, + const int nb00, const int nb01, const int nb02, const int nb03, + const int ne10, const int ne11, const int ne12, const int ne13, + const float sf0, const float sf1, const float sf2, const float sf3, + cudaStream_t stream) { + int dst_size = ne10 * ne11 * ne12 * ne13; + int num_blocks = (dst_size + CUDA_UPSCALE_BLOCK_SIZE - 1) / CUDA_UPSCALE_BLOCK_SIZE; + + upscale_f32<<>>(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3); } void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { @@ -39,10 +40,12 @@ void ggml_cuda_op_upscale(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { cudaStream_t stream = ctx.stream(); GGML_ASSERT(src0->type == GGML_TYPE_F32); - GGML_ASSERT(dst->type == GGML_TYPE_F32); - GGML_ASSERT(src0->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors + GGML_ASSERT( dst->type == GGML_TYPE_F32); - const int scale_factor = dst->op_params[0]; + const float sf0 = (float)dst->ne[0]/src0->ne[0]; + const float sf1 = (float)dst->ne[1]/src0->ne[1]; + const float sf2 = (float)dst->ne[2]/src0->ne[2]; + const float sf3 = (float)dst->ne[3]/src0->ne[3]; - upscale_f32_cuda(src0_d, dst_d, src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], scale_factor, stream); + upscale_f32_cuda(src0_d, dst_d, src0->nb[0], src0->nb[1], src0->nb[2], src0->nb[3], dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], sf0, sf1, sf2, sf3, stream); } diff --git a/ggml-metal.m b/ggml-metal.m index 390a1cd78..b0b16dbf7 100644 --- a/ggml-metal.m +++ b/ggml-metal.m @@ -2353,7 +2353,10 @@ static enum ggml_status ggml_metal_graph_compute( { GGML_ASSERT(src0->type == GGML_TYPE_F32); - const int sf = dst->op_params[0]; + const float sf0 = (float)ne0/src0->ne[0]; + const float sf1 = (float)ne1/src0->ne[1]; + const float sf2 = (float)ne2/src0->ne[2]; + const float sf3 = (float)ne3/src0->ne[3]; const id pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_UPSCALE_F32].pipeline; @@ -2376,7 +2379,10 @@ static enum ggml_status ggml_metal_graph_compute( [encoder setBytes:&nb1 length:sizeof(nb1) atIndex:15]; [encoder setBytes:&nb2 length:sizeof(nb2) atIndex:16]; [encoder setBytes:&nb3 length:sizeof(nb3) atIndex:17]; - [encoder setBytes:&sf length:sizeof(sf) atIndex:18]; + [encoder setBytes:&sf0 length:sizeof(sf0) atIndex:18]; + [encoder setBytes:&sf1 length:sizeof(sf1) atIndex:19]; + [encoder setBytes:&sf2 length:sizeof(sf2) atIndex:20]; + [encoder setBytes:&sf3 length:sizeof(sf3) atIndex:21]; const int nth = MIN((int) pipeline.maxTotalThreadsPerThreadgroup, ne0); diff --git a/ggml-metal.metal b/ggml-metal.metal index 57fdf564e..386e9195f 100644 --- a/ggml-metal.metal +++ b/ggml-metal.metal @@ -1852,7 +1852,10 @@ kernel void kernel_upscale_f32( constant uint64_t & nb1, constant uint64_t & nb2, constant uint64_t & nb3, - constant int32_t & sf, + constant float & sf0, + constant float & sf1, + constant float & sf2, + constant float & sf3, uint3 tgpig[[threadgroup_position_in_grid]], uint3 tpitg[[thread_position_in_threadgroup]], uint3 ntg[[threads_per_threadgroup]]) { @@ -1861,15 +1864,17 @@ kernel void kernel_upscale_f32( const int64_t i2 = tgpig.y; const int64_t i1 = tgpig.x; - const int64_t i03 = i3; - const int64_t i02 = i2; - const int64_t i01 = i1/sf; - - device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01); - device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1); + const int64_t i03 = i3/sf3; + const int64_t i02 = i2/sf2; + const int64_t i01 = i1/sf1; for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) { - dst_ptr[i0] = src0_ptr[i0/sf]; + const int64_t i00 = i0/sf0; + + device const float * src0_ptr = (device const float *) (src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00); + device float * dst_ptr = (device float *) (dst + i3*nb3 + i2*nb2 + i1*nb1 + i0*nb0); + + dst_ptr[0] = src0_ptr[0]; } } diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index 724070eb9..b15efb704 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -13987,6 +13987,10 @@ inline void ggml_sycl_op_upscale(const ggml_tensor *src0, GGML_ASSERT(dst->type == GGML_TYPE_F32); GGML_ASSERT(src0->ne[3] == 1 && dst->ne[3] == 1); // just 3D tensors +#pragma message("TODO: generalize upscale operator") +#pragma message(" https://github.com/ggerganov/ggml/pull/814") + GGML_ASSERT(false && "TODO: generalize upscale operator); + const int scale_factor = dst->op_params[0]; upscale_f32_sycl(src0_dd, dst_dd, src0->ne[0], src0->ne[1], src0->ne[2], scale_factor, main_stream); diff --git a/ggml.c b/ggml.c index 03b609ddd..f09cc3060 100644 --- a/ggml.c +++ b/ggml.c @@ -6293,7 +6293,10 @@ struct ggml_tensor * ggml_pool_2d( static struct ggml_tensor * ggml_upscale_impl( struct ggml_context * ctx, struct ggml_tensor * a, - int scale_factor) { + int ne0, + int ne1, + int ne2, + int ne3) { bool is_node = false; if (a->grad) { @@ -6301,19 +6304,45 @@ static struct ggml_tensor * ggml_upscale_impl( is_node = true; } + GGML_ASSERT(a->ne[0] <= ne0); + GGML_ASSERT(a->ne[1] <= ne1); + GGML_ASSERT(a->ne[2] <= ne2); + GGML_ASSERT(a->ne[3] <= ne3); + struct ggml_tensor * result = ggml_new_tensor_4d(ctx, a->type, - a->ne[0] * scale_factor, - a->ne[1] * scale_factor, - a->ne[2], a->ne[3]); + ne0, + ne1, + ne2, + ne3 + ); result->op = GGML_OP_UPSCALE; - result->op_params[0] = scale_factor; + result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL; result->src[0] = a; return result; } +struct ggml_tensor * ggml_upscale( + struct ggml_context * ctx, + struct ggml_tensor * a, + int scale_factor) { + return ggml_upscale_impl(ctx, a, a->ne[0] * scale_factor, a->ne[1] * scale_factor, a->ne[2], a->ne[3]); +} + +struct ggml_tensor * ggml_upscale_ext( + struct ggml_context * ctx, + struct ggml_tensor * a, + int ne0, + int ne1, + int ne2, + int ne3) { + return ggml_upscale_impl(ctx, a, ne0, ne1, ne2, ne3); +} + +// ggml_pad + struct ggml_tensor * ggml_pad( struct ggml_context * ctx, struct ggml_tensor * a, @@ -6338,12 +6367,7 @@ struct ggml_tensor * ggml_pad( return result; } -struct ggml_tensor * ggml_upscale( - struct ggml_context * ctx, - struct ggml_tensor * a, - int scale_factor) { - return ggml_upscale_impl(ctx, a, scale_factor); -} +// ggml_arange struct ggml_tensor * ggml_arange( struct ggml_context * ctx, @@ -6365,6 +6389,8 @@ struct ggml_tensor * ggml_arange( return result; } +// ggml_timestep_embedding + struct ggml_tensor * ggml_timestep_embedding( struct ggml_context * ctx, struct ggml_tensor * timesteps, @@ -14820,25 +14846,28 @@ static void ggml_compute_forward_upscale_f32( return; } - GGML_ASSERT(src0->nb[0] == sizeof(float)); + GGML_ASSERT(src0->type == GGML_TYPE_F32); const int ith = params->ith; const int nth = params->nth; GGML_TENSOR_UNARY_OP_LOCALS - const int scale_factor = dst->op_params[0]; + const float sf0 = (float)ne0/src0->ne[0]; + const float sf1 = (float)ne1/src0->ne[1]; + const float sf2 = (float)ne2/src0->ne[2]; + const float sf3 = (float)ne3/src0->ne[3]; // TODO: optimize for (int64_t i3 = 0; i3 < ne3; i3++) { - const int64_t i03 = i3; + const int64_t i03 = i3 / sf3; for (int64_t i2 = ith; i2 < ne2; i2 += nth) { - const int64_t i02 = i2; + const int64_t i02 = i2 / sf2; for (int64_t i1 = 0; i1 < ne1; i1++) { - const int64_t i01 = i1 / scale_factor; + const int64_t i01 = i1 / sf1; for (int64_t i0 = 0; i0 < ne0; i0++) { - const int64_t i00 = i0 / scale_factor; + const int64_t i00 = i0 / sf0; const float * x = (float *)((char *) src0->data + i00*nb00 + i01*nb01 + i02*nb02 + i03*nb03); float * y = (float *)((char *) dst->data + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3); @@ -14868,6 +14897,7 @@ static void ggml_compute_forward_upscale( } } + // ggml_compute_forward_pad static void ggml_compute_forward_pad_f32( diff --git a/ggml.h b/ggml.h index 25f4f73a8..5e121604a 100644 --- a/ggml.h +++ b/ggml.h @@ -1674,12 +1674,24 @@ extern "C" { float p1); // nearest interpolate + // multiplies ne0 and ne1 by scale factor // used in stable-diffusion GGML_API struct ggml_tensor * ggml_upscale( struct ggml_context * ctx, struct ggml_tensor * a, int scale_factor); + // nearest interpolate + // nearest interpolate to specified dimensions + // used in tortoise.cpp + GGML_API struct ggml_tensor * ggml_upscale_ext( + struct ggml_context * ctx, + struct ggml_tensor * a, + int ne0, + int ne1, + int ne2, + int ne3); + // pad each dimension with zeros: [x, ..., x] -> [x, ..., x, 0, ..., 0] GGML_API struct ggml_tensor * ggml_pad( struct ggml_context * ctx, diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index f080f7e22..85ef21c2a 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -1329,23 +1329,47 @@ struct test_upscale : public test_case { const ggml_type type; const std::array ne; const int32_t scale_factor; + const bool transpose; std::string vars() override { - return VARS_TO_STR3(type, ne, scale_factor); + return VARS_TO_STR4(type, ne, scale_factor, transpose); } test_upscale(ggml_type type = GGML_TYPE_F32, std::array ne = {512, 512, 3, 1}, - int32_t scale_factor = 2) - : type(type), ne(ne), scale_factor(scale_factor) {} + int32_t scale_factor = 2, bool transpose = false) + : type(type), ne(ne), scale_factor(scale_factor), transpose(transpose) {} ggml_tensor * build_graph(ggml_context * ctx) override { ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); + if (transpose) a = ggml_transpose(ctx, a); ggml_tensor * out = ggml_upscale(ctx, a, scale_factor); return out; } }; +// GGML_OP_UPSCALE (ext) +struct test_upscale_ext : public test_case { + const ggml_type type; + const std::array ne; + const std::array ne_tgt; + + std::string vars() override { + return VARS_TO_STR3(type, ne, ne_tgt); + } + + test_upscale_ext(ggml_type type = GGML_TYPE_F32, + std::array ne = {2, 5, 7, 11}, + std::array ne_tgt = {5, 7, 11, 13}) + : type(type), ne(ne), ne_tgt(ne_tgt) {} + + ggml_tensor * build_graph(ggml_context * ctx) override { + ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data()); + ggml_tensor * out = ggml_upscale_ext(ctx, a, ne_tgt[0], ne_tgt[1],ne_tgt[2], ne_tgt[3]); + return out; + } +}; + // GGML_OP_GROUP_NORM struct test_group_norm : public test_case { const ggml_type type; @@ -2169,6 +2193,8 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op test_cases.emplace_back(new test_sum_rows()); test_cases.emplace_back(new test_upscale()); + test_cases.emplace_back(new test_upscale(GGML_TYPE_F32, { 512, 512, 3, 1 }, 2, true)); + test_cases.emplace_back(new test_upscale_ext()); test_cases.emplace_back(new test_group_norm()); test_cases.emplace_back(new test_acc()); test_cases.emplace_back(new test_pad()); From 29499bb59383c2a8c5d557a90abb08b696cef7f6 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 15 May 2024 13:23:41 +0300 Subject: [PATCH 25/51] sync : ggml --- scripts/sync-ggml.last | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/sync-ggml.last b/scripts/sync-ggml.last index b2b5f8fc1..57bede67b 100644 --- a/scripts/sync-ggml.last +++ b/scripts/sync-ggml.last @@ -1 +1 @@ -fafd5e7f89382b8cfb51e3dac8d4f1500ca44918 +126d34985705a5a2222723c145cb4e125ac689f3 From ea3b0590ee33d3573eb8ef76f88cc60f36d2a38d Mon Sep 17 00:00:00 2001 From: dm4 Date: Wed, 15 May 2024 20:01:12 +0800 Subject: [PATCH 26/51] embedding : free the batch after execution (#7297) --- examples/embedding/embedding.cpp | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index c85a2da53..0c921ed69 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -211,6 +211,7 @@ int main(int argc, char ** argv) { // clean up llama_print_timings(ctx); + llama_batch_free(batch); llama_free(ctx); llama_free_model(model); llama_backend_free(); From 9a17ab914b0aa7353389c656a3f2a0f086726868 Mon Sep 17 00:00:00 2001 From: AidanBeltonS <87009434+AidanBeltonS@users.noreply.github.com> Date: Wed, 15 May 2024 13:26:30 +0100 Subject: [PATCH 27/51] Add missing " (#7303) --- ggml-sycl.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-sycl.cpp b/ggml-sycl.cpp index b15efb704..19d22d637 100644 --- a/ggml-sycl.cpp +++ b/ggml-sycl.cpp @@ -13989,7 +13989,7 @@ inline void ggml_sycl_op_upscale(const ggml_tensor *src0, #pragma message("TODO: generalize upscale operator") #pragma message(" https://github.com/ggerganov/ggml/pull/814") - GGML_ASSERT(false && "TODO: generalize upscale operator); + GGML_ASSERT(false && "TODO: generalize upscale operator"); const int scale_factor = dst->op_params[0]; From 344f9126cc0d15891fde9472fe40b8572628ad7d Mon Sep 17 00:00:00 2001 From: slaren Date: Wed, 15 May 2024 15:08:48 +0200 Subject: [PATCH 28/51] ggml : tag ggml_tensor::backend as deprecated (#7290) --- examples/llava/llava.cpp | 15 --------------- ggml-backend.c | 1 - ggml.c | 10 ++++++++++ ggml.h | 3 ++- 4 files changed, 12 insertions(+), 17 deletions(-) diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp index 9a990bb18..63878d176 100644 --- a/examples/llava/llava.cpp +++ b/examples/llava/llava.cpp @@ -88,7 +88,6 @@ static struct clip_image_grid_shape get_anyres_image_grid_shape(const std::pair< // Take the image segments in a grid configuration and return the embeddings and the number of embeddings into preallocated memory (image_embd_out) static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector & image_embd_v, struct clip_image_grid_shape grid_shape, float * image_embd_out, int * n_img_pos_out) { struct { - struct ggml_tensor * newline; struct ggml_context * ctx; } model; @@ -150,20 +149,6 @@ static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector model.ctx = ggml_init(params); - ggml_tensor * newline_tmp = clip_get_newline_tensor(ctx_clip); - model.newline = ggml_new_tensor_1d(model.ctx, GGML_TYPE_F32, newline_tmp->ne[0]); - if (newline_tmp->backend != GGML_BACKEND_TYPE_CPU) { - if (newline_tmp->buffer == NULL) { - LOG_TEE("newline_tmp tensor buffer is NULL\n"); - } - ggml_backend_tensor_get(newline_tmp, model.newline->data, 0, ggml_nbytes(newline_tmp)); - } else { - model.newline->data = newline_tmp->data; - if (model.newline->data == NULL) { - LOG_TEE("newline_tmp tensor data is NULL\n"); - } - } - struct ggml_tensor * image_features = ggml_new_tensor_3d(model.ctx, GGML_TYPE_F32, clip_n_mmproj_embd(ctx_clip), clip_n_patches(ctx_clip), num_images - 1); // example: 4096 x 576 x 4 // ggml_tensor_printf(image_features,"image_features",__LINE__,false,false); // fill it with the image embeddings, ignoring the base diff --git a/ggml-backend.c b/ggml-backend.c index dd090a583..9e35ce98d 100644 --- a/ggml-backend.c +++ b/ggml-backend.c @@ -1895,7 +1895,6 @@ void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * t tensor->buffer = buffer; tensor->data = (char *)tensor->view_src->data + tensor->view_offs; - tensor->backend = tensor->view_src->backend; ggml_backend_buffer_init_tensor(buffer, tensor); } diff --git a/ggml.c b/ggml.c index f09cc3060..67e17a210 100644 --- a/ggml.c +++ b/ggml.c @@ -3178,6 +3178,12 @@ static struct ggml_tensor * ggml_new_tensor_impl( struct ggml_tensor * const result = (struct ggml_tensor *)((char *)ctx->mem_buffer + obj_new->offs); +#ifdef __clang__ + // temporary until ggml_tensor::backend is removed + #pragma clang diagnostic push + #pragma clang diagnostic ignored "-Wdeprecated-declarations" +#endif + *result = (struct ggml_tensor) { /*.type =*/ type, /*.backend =*/ GGML_BACKEND_TYPE_CPU, @@ -3200,6 +3206,10 @@ static struct ggml_tensor * ggml_new_tensor_impl( /*.padding =*/ { 0 }, }; +#ifdef __clang__ + #pragma clang diagnostic pop +#endif + // TODO: this should not be needed as long as we don't rely on aligned SIMD loads //ggml_assert_aligned(result->data); diff --git a/ggml.h b/ggml.h index 5e121604a..8c13f4ba8 100644 --- a/ggml.h +++ b/ggml.h @@ -565,7 +565,8 @@ extern "C" { // n-dimensional tensor struct ggml_tensor { enum ggml_type type; - enum ggml_backend_type backend; + + GGML_DEPRECATED(enum ggml_backend_type backend, "use the buffer type to find the storage location of the tensor"); struct ggml_backend_buffer * buffer; From dc020985b8755dd6aa93a2f002f43c3ede808cce Mon Sep 17 00:00:00 2001 From: agray3 Date: Wed, 15 May 2024 14:44:49 +0100 Subject: [PATCH 29/51] Avoid unnecessarily disabling CUDA graphs (#7302) As discussed in PR #6766, CUDA graphs were being disabled in the presence of long prompts. This fixes the issue by avoiding the consective update counter from incrementing unnecessarily for tokens in which cuda graphs are disabled due to batch size > 1. --- ggml-cuda.cu | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 75a2ad480..04b6e5285 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -2558,7 +2558,7 @@ GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t } // Disable CUDA graphs (from the next token) if the use-case is demanding too many consecutive graph updates. - if (cuda_graph_update_required) { + if (use_cuda_graph && cuda_graph_update_required) { cuda_ctx->cuda_graph->number_consecutive_updates++; } else { cuda_ctx->cuda_graph->number_consecutive_updates = 0; From e1b40ac3b94824d761b5e26ea1bc5692706029d9 Mon Sep 17 00:00:00 2001 From: kunnis Date: Wed, 15 May 2024 12:59:12 -0500 Subject: [PATCH 30/51] ggml : use dynamic thread scheduling for matrix multiplication (#6915) * Just reordering some structs. * Adding in the calls to mm_pause * Passing around the state * Renaming and moving a bunch of variables around. * Extracting the logic to it's own function. * Moving some variable definitions into the chunk function. * Moving some variables around * moving src1_cont inside * Moving row_size * adding the current_chunk * Reorg the code. * Formatting to match the orig patch * starting to setup the chunking variables * Starting the buildup of the loop * The yield shouldn't be necessary. * adding the looping structure based on the chunk configuration. * Add in the re-chunking code. * Making it much more likely to rechunk. * disable resizing if numa is enabled. * Updating comments with what we've learned. * Fix formatting * Couple more formatting fixes. * More style fixes. * Fix Warnings * Going with unused because there's conditional logic that needs it. * Update ggml.c * Update ggml.c --------- --- ggml.c | 381 +++++++++++++++++++++++++++++++++++---------------------- 1 file changed, 237 insertions(+), 144 deletions(-) diff --git a/ggml.c b/ggml.c index 67e17a210..684d84235 100644 --- a/ggml.c +++ b/ggml.c @@ -112,6 +112,8 @@ typedef void * thread_ret_t; #endif +typedef pthread_t ggml_thread_t; + #ifdef GGML_USE_CPU_HBM #include #endif @@ -1539,6 +1541,59 @@ static inline void __sse_f16x4_store(ggml_fp16_t *x, __m128 y) { #define GGML_F16_ARR (GGML_F16_STEP/GGML_F16_EPR) #endif +// +// ggml context +// + +struct ggml_context { + size_t mem_size; + void* mem_buffer; + bool mem_buffer_owned; + bool no_alloc; + bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers + + int n_objects; + + struct ggml_object* objects_begin; + struct ggml_object* objects_end; + + struct ggml_scratch scratch; + struct ggml_scratch scratch_save; +}; + +struct ggml_context_container { + bool used; + + struct ggml_context context; +}; + +struct ggml_compute_state_shared { + const struct ggml_cgraph* cgraph; + const struct ggml_cplan* cplan; + + int64_t perf_node_start_cycles; + int64_t perf_node_start_time_us; + + const int n_threads; + + // synchronization primitives + atomic_int n_active; // num active threads + atomic_int node_n; // active graph node + atomic_int node_task; // active graph node task phase + + ggml_abort_callback abort_callback; // abort ggml_graph_compute when true + void* abort_callback_data; + + atomic_int current_chunk; // currently processing chunk during Mat_Mul, shared between all the threads. +}; + +struct ggml_compute_state { + ggml_thread_t thrd; + int ith; + struct ggml_compute_state_shared* shared; + enum ggml_status ec; +}; + // // fundamental operations // @@ -2385,32 +2440,6 @@ static void ggml_setup_op_has_task_pass(void) { } } -// -// ggml context -// - -struct ggml_context { - size_t mem_size; - void * mem_buffer; - bool mem_buffer_owned; - bool no_alloc; - bool no_alloc_save; // this is used to save the no_alloc state when using scratch buffers - - int n_objects; - - struct ggml_object * objects_begin; - struct ggml_object * objects_end; - - struct ggml_scratch scratch; - struct ggml_scratch scratch_save; -}; - -struct ggml_context_container { - bool used; - - struct ggml_context context; -}; - // // NUMA support // @@ -11815,9 +11844,101 @@ static bool ggml_compute_forward_mul_mat_use_blas(struct ggml_tensor * dst) { } #endif +static void ggml_compute_forward_mul_mat_one_chunk( + const struct ggml_compute_params * params, + struct ggml_tensor * dst, + const int64_t num_rows_per_vec_dot, + const int64_t ir0_start, + const int64_t ir0_end, + const int64_t ir1_start, + const int64_t ir1_end) { + + const struct ggml_tensor * src0 = dst->src[0]; + const struct ggml_tensor * src1 = dst->src[1]; + + GGML_TENSOR_BINARY_OP_LOCALS + + const enum ggml_type type = src0->type; + + const bool src1_cont = ggml_is_contiguous(src1); + + ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot; + enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type; + + // broadcast factors + const int64_t r2 = ne12 / ne02; + const int64_t r3 = ne13 / ne03; + + //printf("ir0_start = %6lld, ir0_end = %6lld, ir1_start = %6lld, ir1_end = %6lld\n", ir0_start, ir0_end, ir1_start, ir1_end); + + // threads with no work simply yield (not sure if it helps) + if (ir0_start >= ir0_end || ir1_start >= ir1_end) { + return; + } + + const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; + const size_t row_size = ggml_row_size(vec_dot_type, ne10); + + assert(ne12 % ne02 == 0); + assert(ne13 % ne03 == 0); + + // block-tiling attempt + const int64_t blck_0 = 16; + const int64_t blck_1 = 16; + + const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11; + + // attempt to reduce false-sharing (does not seem to make a difference) + // 16 * 2, accounting for mmla kernels + float tmp[32]; + + for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) { + for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) { + for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ir1 += num_rows_per_vec_dot) { + const int64_t i13 = (ir1 / (ne12 * ne1)); + const int64_t i12 = (ir1 - i13 * ne12 * ne1) / ne1; + const int64_t i11 = (ir1 - i13 * ne12 * ne1 - i12 * ne1); + + // broadcast src0 into src1 + const int64_t i03 = i13 / r3; + const int64_t i02 = i12 / r2; + + const int64_t i1 = i11; + const int64_t i2 = i12; + const int64_t i3 = i13; + + const char * src0_row = (const char*)src0->data + (0 + i02 * nb02 + i03 * nb03); + + // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides + // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using + // the original src1 data pointer, so we should index using the indices directly + // TODO: this is a bit of a hack, we should probably have a better way to handle this + const char * src1_col = (const char*)wdata + + (src1_cont || src1->type != vec_dot_type + ? (i11 + i12 * ne11 + i13 * ne12 * ne11) * row_size + : (i11 * nb11 + i12 * nb12 + i13 * nb13)); + float * dst_col = (float*)((char*)dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3)); + + //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) { + // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col); + //} + + for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ir0 += num_rows_per_vec_dot) { + vec_dot(ne00, &tmp[ir0 - iir0], (num_rows_per_vec_dot > 1 ? 16 : 0), src0_row + ir0 * nb01, (num_rows_per_vec_dot > 1 ? nb01 : 0), src1_col, (num_rows_per_vec_dot > 1 ? src1_col_stride : 0), num_rows_per_vec_dot); + } + + for (int cn = 0; cn < num_rows_per_vec_dot; ++cn) { + memcpy(&dst_col[iir0 + cn * nb1 / nb0], tmp + (cn * 16), (MIN(iir0 + blck_0, ir0_end) - iir0) * sizeof(float)); + } + } + } + } +} + static void ggml_compute_forward_mul_mat( const struct ggml_compute_params * params, - struct ggml_tensor * dst) { + struct ggml_tensor * dst, + struct ggml_compute_state * state) { const struct ggml_tensor * src0 = dst->src[0]; const struct ggml_tensor * src1 = dst->src[1]; @@ -11832,9 +11953,6 @@ static void ggml_compute_forward_mul_mat( const enum ggml_type type = src0->type; - const bool src1_cont = ggml_is_contiguous(src1); - - ggml_vec_dot_t const vec_dot = type_traits[type].vec_dot; enum ggml_type const vec_dot_type = type_traits[type].vec_dot_type; ggml_from_float_t const from_float_to_vec_dot = type_traits[vec_dot_type].from_float; int64_t const vec_dot_num_rows = type_traits[type].nrows; @@ -11855,8 +11973,10 @@ static void ggml_compute_forward_mul_mat( GGML_ASSERT(nb2 <= nb3); // broadcast factors - const int64_t r2 = ne12/ne02; - const int64_t r3 = ne13/ne03; + const int64_t r2 = ne12 / ne02; + const int64_t r3 = ne13 / ne03; + UNUSED(r2); + UNUSED(r3); // nb01 >= nb00 - src0 is not transposed // compute by src0 rows @@ -11938,6 +12058,8 @@ static void ggml_compute_forward_mul_mat( #endif #if GGML_USE_LLAMAFILE + const bool src1_cont = ggml_is_contiguous(src1); + if (src1_cont) { for (int64_t i13 = 0; i13 < ne13; i13++) for (int64_t i12 = 0; i12 < ne12; i12++) @@ -11963,6 +12085,8 @@ UseGgmlGemm1:; if (ith != 0) { return; } + // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start. + atomic_store(&state->shared->current_chunk, nth); if (src1->type != vec_dot_type) { char * wdata = params->wdata; const size_t row_size = ggml_row_size(vec_dot_type, ne10); @@ -11987,11 +12111,11 @@ UseGgmlGemm1:; return; } - const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; - const size_t row_size = ggml_row_size(vec_dot_type, ne10); - #if GGML_USE_LLAMAFILE if (src1->type != vec_dot_type) { + const void* wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata; + const size_t row_size = ggml_row_size(vec_dot_type, ne10); + for (int64_t i13 = 0; i13 < ne13; i13++) for (int64_t i12 = 0; i12 < ne12; i12++) if (!llamafile_sgemm(ne01, ne11, ne00/ggml_blck_size(src0->type), @@ -12012,98 +12136,87 @@ UseGgmlGemm1:; UseGgmlGemm2:; #endif - const int64_t nr0 = ne01; // src0 rows - const int64_t nr1 = ne1*ne12*ne13; // src1 rows +#ifdef GGML_PERF + int chunks_executed = 0; + UNUSED(chunks_executed); +#endif - //printf("nr0 = %lld, nr1 = %lld\n", nr0, nr1); + // This is the size of the first dimension of the result, so we can iterate that way. (see the ASSERT above, these are the same numbers) + const int64_t nr0 = ne0; - // distribute the thread work across the inner or outer loop based on which one is larger - - const int64_t nth0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows - const int64_t nth1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows - - const int64_t ith0 = ith % nth0; - const int64_t ith1 = ith / nth0; - - const int64_t dr0 = (nr0 + nth0 - 1)/nth0; - const int64_t dr1 = (nr1 + nth1 - 1)/nth1; - - const int64_t ir010 = dr0*ith0; - const int64_t ir011 = MIN(ir010 + dr0, nr0); - - const int64_t ir110 = dr1*ith1; - const int64_t ir111 = MIN(ir110 + dr1, nr1); - - //printf("ir010 = %6lld, ir011 = %6lld, ir110 = %6lld, ir111 = %6lld\n", ir010, ir011, ir110, ir111); - - // threads with no work simply yield (not sure if it helps) - if (ir010 >= ir011 || ir110 >= ir111) { - sched_yield(); - return; - } - - assert(ne12 % ne02 == 0); - assert(ne13 % ne03 == 0); - - // block-tiling attempt - const int64_t blck_0 = 16; - const int64_t blck_1 = 16; + // This is the size of the rest of the dimensions of the result + const int64_t nr1 = ne1 * ne2 * ne3; // dot kernels can handle 1 row and col at a time, but mmla kernels can process 2 rows and cols - int64_t nrc = vec_dot_num_rows; + int64_t num_rows_per_vec_dot = vec_dot_num_rows; // TODO: currently the mmla kernels support only even numbered rows/cols. // this check can be removed once they are extended to support odd numbered rows/cols too if ((nr0 % 2 != 0) || (ne11 % 2 != 0)) { - nrc = 1; + num_rows_per_vec_dot = 1; } - const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11; + // Now select a reasonable chunk size. + int chunk_size = 16; - // attempt to reduce false-sharing (does not seem to make a difference) - // 16 * 2, accounting for mmla kernels - float tmp[32]; + // We need to step up the size if it's small + if (nr0 == 1 || nr1 == 1) { + chunk_size = 64; + } - for (int64_t iir1 = ir110; iir1 < ir111; iir1 += blck_1) { - for (int64_t iir0 = ir010; iir0 < ir011; iir0 += blck_0) { - for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir111; ir1 += nrc) { - const int64_t i13 = (ir1/(ne12*ne1)); - const int64_t i12 = (ir1 - i13*ne12*ne1)/ne1; - const int64_t i11 = (ir1 - i13*ne12*ne1 - i12*ne1); + // distribute the work across the inner or outer loop based on which one is larger + // The number of chunks in the 0/1 dim. + // CEIL(nr0/chunk_size) + int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size; + int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size; - // broadcast src0 into src1 - const int64_t i03 = i13/r3; - const int64_t i02 = i12/r2; + // If the chunking is poor for the number of threads on this setup, scrap the whole plan. Re-chunk it by thread. + // Also, chunking by thread was measured to have perform better on NUMA systems. See https://github.com/ggerganov/llama.cpp/pull/6915 + // In theory, chunking should be just as useful on NUMA and non NUMA systems, but testing disagreed with that. + if (nchunk0 * nchunk1 < nth * 4 || ggml_is_numa()) { + // distribute the thread work across the inner or outer loop based on which one is larger + nchunk0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows + nchunk1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows + } - const int64_t i1 = i11; - const int64_t i2 = i12; - const int64_t i3 = i13; + // The number of elements in each chunk + const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0; + const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1; - const char * src0_row = (const char *) src0->data + (0 + i02*nb02 + i03*nb03); + //if (ith == 0) + // printf("MUL_MAT = [%d, %d, %d, %d] x [%d, %d, %d, %d] = %d x %d = %d. Fp Ops/Ch %d\n", ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nchunk0, nchunk1, nchunk0 * nchunk1, ne00 * nr0 * nr1 / nchunk0 / nchunk1); - // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides - // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using - // the original src1 data pointer, so we should index using the indices directly - // TODO: this is a bit of a hack, we should probably have a better way to handle this - const char * src1_col = (const char *) wdata + - (src1_cont || src1->type != vec_dot_type - ? (i11 + i12*ne11 + i13*ne12*ne11)*row_size - : (i11*nb11 + i12*nb12 + i13*nb13)); - float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2 + i3*nb3)); + // The first chunk comes from our thread_id, the rest will get auto-assigned. + int current_chunk = ith; - //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ++ir0) { - // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col); - //} + while (current_chunk < nchunk0 * nchunk1) { + const int64_t ith0 = current_chunk % nchunk0; + const int64_t ith1 = current_chunk / nchunk0; - for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir011; ir0 += nrc) { - vec_dot(ne00, &tmp[ir0 - iir0], (nrc>1 ? 16 : 0), src0_row + ir0*nb01, (nrc>1 ? nb01 : 0), src1_col, (nrc>1 ? src1_col_stride : 0), nrc); - } + const int64_t ir0_start = dr0 * ith0; + const int64_t ir0_end = MIN(ir0_start + dr0, nr0); - for (int cn = 0; cn < nrc; ++cn) { - memcpy(&dst_col[iir0 + cn*nb1/nb0], tmp + (cn*16), (MIN(iir0 + blck_0, ir011) - iir0)*sizeof(float)); - } - } + const int64_t ir1_start = dr1 * ith1; + const int64_t ir1_end = MIN(ir1_start + dr1, nr1); + + ggml_compute_forward_mul_mat_one_chunk(params, dst, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end); + +#ifdef GGML_PERF + chunks_executed++; +#endif + + if (nth >= nchunk0 * nchunk1) { + break; } + + current_chunk = atomic_fetch_add(&state->shared->current_chunk, 1); } + +#ifdef GGML_PERF + // These numbers are useful when trying to measure how well the threading scheduling works. + //int64_t workSize = (ne01 * ne11 * ne12 * ne13 * ne00) / nchunk0 / nchunk1; + //float time = (ggml_perf_time_us() - t0); + //printf("MUL_MAT = %f ms, [%d, %d, %d, %d] x [%d, %d, %d, %d] = %I64u, %f ops/usec in %d chunks.\n", time / 1000.0, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, workSize, (float)workSize/time, chunks_executed); +#endif } // ggml_compute_forward_mul_mat_id @@ -17358,7 +17471,7 @@ static void ggml_compute_forward_cross_entropy_loss_back( ///////////////////////////////// -static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) { +static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor, struct ggml_compute_state * state) { GGML_ASSERT(params); if (tensor->op == GGML_OP_NONE || ggml_is_empty(tensor)) { @@ -17456,7 +17569,7 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm } break; case GGML_OP_MUL_MAT: { - ggml_compute_forward_mul_mat(params, tensor); + ggml_compute_forward_mul_mat(params, tensor, state); } break; case GGML_OP_MUL_MAT_ID: { @@ -19072,8 +19185,6 @@ typedef int ggml_lock_t; #define GGML_LOCK_INITIALIZER 0 -typedef pthread_t ggml_thread_t; - #define ggml_thread_create pthread_create #define ggml_thread_join pthread_join @@ -19099,8 +19210,6 @@ typedef int ggml_lock_t; #define GGML_LOCK_INITIALIZER 0 -typedef pthread_t ggml_thread_t; - #define ggml_thread_create pthread_create #define ggml_thread_join pthread_join @@ -19180,31 +19289,6 @@ static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); } static void clear_numa_thread_affinity(void) {} #endif -struct ggml_compute_state_shared { - const struct ggml_cgraph * cgraph; - const struct ggml_cplan * cplan; - - int64_t perf_node_start_cycles; - int64_t perf_node_start_time_us; - - const int n_threads; - - // synchronization primitives - atomic_int n_active; // num active threads - atomic_int node_n; // active graph node - atomic_int node_task; // active graph node task phase - - ggml_abort_callback abort_callback; // abort ggml_graph_compute when true - void * abort_callback_data; -}; - -struct ggml_compute_state { - ggml_thread_t thrd; - int ith; - struct ggml_compute_state_shared * shared; - enum ggml_status ec; -}; - static void ggml_graph_compute_perf_stats_node(struct ggml_tensor * node, const struct ggml_compute_state_shared * st) { int64_t cycles_cur = ggml_perf_cycles() - st->perf_node_start_cycles; int64_t time_us_cur = ggml_perf_time_us() - st->perf_node_start_time_us; @@ -19477,6 +19561,10 @@ static void ggml_graph_compute_thread_sync_node(int * node_n, struct ggml_comput * node_n = atomic_load(&state->shared->node_n); if (* node_n != last_node_n) break; +#if defined(__SSE3__) + // Tell the processor we're spinning. It's a processor hint for spinlocks. + _mm_pause(); +#endif } } @@ -19491,6 +19579,10 @@ static void ggml_graph_compute_thread_sync_task(int * task_phase, struct ggml_co * task_phase = atomic_load(&state->shared->node_task); if (* task_phase != last_task_phase) break; +#if defined(__SSE3__) + // Tell the processor we're spinning. It's a processor hint for spinlocks. + _mm_pause(); +#endif } } @@ -19530,7 +19622,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { struct ggml_tensor * node = cgraph->nodes[node_n]; if (GGML_OP_HAS_FINALIZE[node->op]) { params.nth = ggml_get_n_tasks(node, n_threads, state->shared->n_threads); - ggml_compute_forward(¶ms, node); + ggml_compute_forward(¶ms, node, state); } ggml_graph_compute_perf_stats_node(node, state->shared); } @@ -19550,17 +19642,17 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { /* INIT */ if (GGML_OP_HAS_INIT[node->op]) { params.type = GGML_TASK_TYPE_INIT; - ggml_compute_forward(¶ms, node); + ggml_compute_forward(¶ms, node, state); } // TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1, // they do something more efficient than spinning (?) params.type = GGML_TASK_TYPE_COMPUTE; - ggml_compute_forward(¶ms, node); + ggml_compute_forward(¶ms, node, state); if (GGML_OP_HAS_FINALIZE[node->op]) { params.type = GGML_TASK_TYPE_FINALIZE; - ggml_compute_forward(¶ms, node); + ggml_compute_forward(¶ms, node, state); } ggml_graph_compute_perf_stats_node(node, state->shared); @@ -19599,7 +19691,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { if (state->ith < n_tasks) { if (GGML_OP_HAS_INIT[node->op]) { - ggml_compute_forward(¶ms, node); + ggml_compute_forward(¶ms, node, state); } } @@ -19620,7 +19712,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) { if (state->ith < n_tasks) { params.type = GGML_TASK_TYPE_COMPUTE; - ggml_compute_forward(¶ms, node); + ggml_compute_forward(¶ms, node, state); } if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) { @@ -19871,6 +19963,7 @@ enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cpl /*.node_task =*/ GGML_TASK_TYPE_FINALIZE, /*.abort_callback =*/ NULL, /*.abort_callback_data =*/ NULL, + /*.current_chunk; =*/ 0, }; struct ggml_compute_state * workers = alloca(sizeof(struct ggml_compute_state)*n_threads); From 8f7080bf48828b538bc9387c3d150bbd4fb4cf2d Mon Sep 17 00:00:00 2001 From: Daniel Bevenius Date: Wed, 15 May 2024 23:41:03 +0200 Subject: [PATCH 31/51] readme : remove stray double quote (#7310) Signed-off-by: Daniel Bevenius --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9f2f8df64..ecbe802df 100644 --- a/README.md +++ b/README.md @@ -532,7 +532,7 @@ Building the program with BLAS support may lead to some performance improvements cmake -B build -DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \ && cmake --build build --config Release -- -j 16 ``` - On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DLLAMA_HIP_UMA=ON"`. + On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DLLAMA_HIP_UMA=ON`. However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs). - Using `make` (example for target gfx1030, build with 16 CPU threads): From 13ad16af1231ab2d245d35df3295bcfa23de1305 Mon Sep 17 00:00:00 2001 From: Max Krasnyansky Date: Wed, 15 May 2024 19:47:36 -0700 Subject: [PATCH 32/51] Add support for properly optimized Windows ARM64 builds with LLVM and MSVC (#7191) * logging: add proper checks for clang to avoid errors and warnings with VA_ARGS * build: add CMake Presets and toolchian files for Windows ARM64 * matmul-int8: enable matmul-int8 with MSVC and fix Clang warnings * ci: add support for optimized Windows ARM64 builds with MSVC and LLVM * matmul-int8: fixed typos in q8_0_q8_0 matmuls Co-authored-by: Georgi Gerganov * matmul-int8: remove unnecessary casts in q8_0_q8_0 --------- Co-authored-by: Georgi Gerganov --- .github/workflows/build.yml | 61 ++++++++++++++++++---------------- CMakeLists.txt | 5 +++ CMakePresets.json | 45 +++++++++++++++++++++++++ cmake/arm64-windows-llvm.cmake | 16 +++++++++ cmake/arm64-windows-msvc.cmake | 6 ++++ common/log.h | 10 +++--- ggml-quants.c | 53 +++++++++++++++-------------- 7 files changed, 138 insertions(+), 58 deletions(-) create mode 100644 CMakePresets.json create mode 100644 cmake/arm64-windows-llvm.cmake create mode 100644 cmake/arm64-windows-msvc.cmake diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7ac0e5f6e..2d2fea4a2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -693,26 +693,28 @@ jobs: strategy: matrix: include: - - build: 'rpc' + - build: 'rpc-x64' defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_RPC=ON -DBUILD_SHARED_LIBS=ON' - - build: 'noavx' + - build: 'noavx-x64' defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX=OFF -DLLAMA_AVX2=OFF -DLLAMA_FMA=OFF -DBUILD_SHARED_LIBS=ON' - - build: 'avx2' + - build: 'avx2-x64' defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' - - build: 'avx' + - build: 'avx-x64' defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX2=OFF -DBUILD_SHARED_LIBS=ON' - - build: 'avx512' + - build: 'avx512-x64' defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_AVX512=ON -DBUILD_SHARED_LIBS=ON' - - build: 'clblast' + - build: 'clblast-x64' defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_CLBLAST=ON -DBUILD_SHARED_LIBS=ON -DCMAKE_PREFIX_PATH="$env:RUNNER_TEMP/clblast"' - - build: 'openblas' + - build: 'openblas-x64' defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_BLAS=ON -DBUILD_SHARED_LIBS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS -DBLAS_INCLUDE_DIRS="$env:RUNNER_TEMP/openblas/include" -DBLAS_LIBRARIES="$env:RUNNER_TEMP/openblas/lib/openblas.lib"' - - build: 'kompute' + - build: 'kompute-x64' defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_KOMPUTE=ON -DKOMPUTE_OPT_DISABLE_VULKAN_VERSION_CHECK=ON -DBUILD_SHARED_LIBS=ON' - - build: 'vulkan' + - build: 'vulkan-x64' defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_VULKAN=ON -DBUILD_SHARED_LIBS=ON' - - build: 'arm64' - defines: '-A ARM64 -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' + - build: 'llvm-arm64' + defines: '-G Ninja -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' + - build: 'msvc-arm64' + defines: '-G Ninja -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' steps: - name: Clone @@ -723,13 +725,13 @@ jobs: - name: Clone Kompute submodule id: clone_kompute - if: ${{ matrix.build == 'kompute' }} + if: ${{ matrix.build == 'kompute-x64' }} run: | git submodule update --init kompute - name: Download OpenCL SDK id: get_opencl - if: ${{ matrix.build == 'clblast' }} + if: ${{ matrix.build == 'clblast-x64' }} run: | curl.exe -o $env:RUNNER_TEMP/opencl.zip -L "https://github.com/KhronosGroup/OpenCL-SDK/releases/download/v${env:OPENCL_VERSION}/OpenCL-SDK-v${env:OPENCL_VERSION}-Win-x64.zip" mkdir $env:RUNNER_TEMP/opencl @@ -737,7 +739,7 @@ jobs: - name: Download CLBlast id: get_clblast - if: ${{ matrix.build == 'clblast' }} + if: ${{ matrix.build == 'clblast-x64' }} run: | curl.exe -o $env:RUNNER_TEMP/clblast.7z -L "https://github.com/CNugteren/CLBlast/releases/download/${env:CLBLAST_VERSION}/CLBlast-${env:CLBLAST_VERSION}-windows-x64.7z" curl.exe -o $env:RUNNER_TEMP/CLBlast.LICENSE.txt -L "https://github.com/CNugteren/CLBlast/raw/${env:CLBLAST_VERSION}/LICENSE" @@ -750,7 +752,7 @@ jobs: - name: Download OpenBLAS id: get_openblas - if: ${{ matrix.build == 'openblas' }} + if: ${{ matrix.build == 'openblas-x64' }} run: | curl.exe -o $env:RUNNER_TEMP/openblas.zip -L "https://github.com/xianyi/OpenBLAS/releases/download/v${env:OPENBLAS_VERSION}/OpenBLAS-${env:OPENBLAS_VERSION}-x64.zip" curl.exe -o $env:RUNNER_TEMP/OpenBLAS.LICENSE.txt -L "https://github.com/xianyi/OpenBLAS/raw/v${env:OPENBLAS_VERSION}/LICENSE" @@ -763,38 +765,41 @@ jobs: - name: Install Vulkan SDK id: get_vulkan - if: ${{ matrix.build == 'kompute' || matrix.build == 'vulkan' }} + if: ${{ matrix.build == 'kompute-x64' || matrix.build == 'vulkan-x64' }} run: | curl.exe -o $env:RUNNER_TEMP/VulkanSDK-Installer.exe -L "https://sdk.lunarg.com/sdk/download/${env:VULKAN_VERSION}/windows/VulkanSDK-${env:VULKAN_VERSION}-Installer.exe" & "$env:RUNNER_TEMP\VulkanSDK-Installer.exe" --accept-licenses --default-answer --confirm-command install Add-Content $env:GITHUB_ENV "VULKAN_SDK=C:\VulkanSDK\${env:VULKAN_VERSION}" Add-Content $env:GITHUB_PATH "C:\VulkanSDK\${env:VULKAN_VERSION}\bin" + - name: Install Ninja + id: install_ninja + run: | + choco install ninja + - name: Build id: cmake_build run: | - mkdir build - cd build - cmake .. ${{ matrix.defines }} - cmake --build . --config Release -j ${env:NUMBER_OF_PROCESSORS} + cmake -S . -B build ${{ matrix.defines }} + cmake --build build --config Release -j ${env:NUMBER_OF_PROCESSORS} - name: Add clblast.dll id: add_clblast_dll - if: ${{ matrix.build == 'clblast' }} + if: ${{ matrix.build == 'clblast-x64' }} run: | cp $env:RUNNER_TEMP/clblast/lib/clblast.dll ./build/bin/Release cp $env:RUNNER_TEMP/CLBlast.LICENSE.txt ./build/bin/Release/CLBlast-${env:CLBLAST_VERSION}.txt - name: Add libopenblas.dll id: add_libopenblas_dll - if: ${{ matrix.build == 'openblas' }} + if: ${{ matrix.build == 'openblas-x64' }} run: | cp $env:RUNNER_TEMP/openblas/bin/libopenblas.dll ./build/bin/Release/openblas.dll cp $env:RUNNER_TEMP/OpenBLAS.LICENSE.txt ./build/bin/Release/OpenBLAS-${env:OPENBLAS_VERSION}.txt - name: Check AVX512F support id: check_avx512f - if: ${{ matrix.build == 'avx512' }} + if: ${{ matrix.build == 'avx512-x64' }} continue-on-error: true run: | cd build @@ -808,14 +813,14 @@ jobs: - name: Test id: cmake_test # not all machines have native AVX-512 - if: ${{ matrix.build != 'arm64' && matrix.build != 'clblast' && matrix.build != 'kompute' && matrix.build != 'vulkan' && (matrix.build != 'avx512' || env.HAS_AVX512F == '1') }} + if: ${{ matrix.build != 'msvc-arm64' && matrix.build != 'llvm-arm64' && matrix.build != 'clblast-x64' && matrix.build != 'kompute-x64' && matrix.build != 'vulkan-x64' && (matrix.build != 'avx512-x64' || env.HAS_AVX512F == '1') }} run: | cd build ctest -L main -C Release --verbose --timeout 900 - name: Test (Intel SDE) id: cmake_test_sde - if: ${{ matrix.build == 'avx512' && env.HAS_AVX512F == '0' }} # use Intel SDE for AVX-512 emulation + if: ${{ matrix.build == 'avx512-x64' && env.HAS_AVX512F == '0' }} # use Intel SDE for AVX-512 emulation run: | curl.exe -o $env:RUNNER_TEMP/sde.tar.xz -L "https://downloadmirror.intel.com/813591/sde-external-${env:SDE_VERSION}-win.tar.xz" # for some weird reason windows tar doesn't like sde tar.xz @@ -843,14 +848,14 @@ jobs: if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} run: | Copy-Item LICENSE .\build\bin\Release\llama.cpp.txt - 7z a llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-x64.zip .\build\bin\Release\* + 7z a llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip .\build\bin\Release\* - name: Upload artifacts if: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/master' ) || github.event.inputs.create_release == 'true' }} uses: actions/upload-artifact@v4 with: - path: llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}-x64.zip - name: llama-bin-win-${{ matrix.build }}-x64.zip + path: llama-${{ steps.tag.outputs.name }}-bin-win-${{ matrix.build }}.zip + name: llama-bin-win-${{ matrix.build }}.zip windows-latest-cmake-cuda: runs-on: windows-latest diff --git a/CMakeLists.txt b/CMakeLists.txt index feb6f39d0..8ab6a45a6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1007,6 +1007,11 @@ if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64" OR CMAKE_GENERATOR_PLATFORM_LWR STR if (GGML_COMPILER_SUPPORT_DOTPROD) add_compile_definitions(__ARM_FEATURE_DOTPROD) endif () + check_cxx_source_compiles("#include \nint main() { int8x16_t _a, _b; int32x4_t _s = vmlaq_f32(_s, _a, _b); return 0; }" GGML_COMPILER_SUPPORT_MATMUL_INT8) + if (GGML_COMPILER_SUPPORT_MATMUL_INT8) + add_compile_definitions(__ARM_FEATURE_MATMUL_INT8) + endif () + check_cxx_source_compiles("#include \nint main() { float16_t _a; float16x8_t _s = vdupq_n_f16(_a); return 0; }" GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC) if (GGML_COMPILER_SUPPORT_FP16_VECTOR_ARITHMETIC) add_compile_definitions(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) diff --git a/CMakePresets.json b/CMakePresets.json new file mode 100644 index 000000000..ad1af7ecc --- /dev/null +++ b/CMakePresets.json @@ -0,0 +1,45 @@ +{ + "version": 4, + "configurePresets": [ + { + "name": "base", + "hidden": true, + "generator": "Ninja", + "binaryDir": "${sourceDir}/build-${presetName}", + "cacheVariables": { + "CMAKE_EXPORT_COMPILE_COMMANDS": "ON", + "CMAKE_INSTALL_RPATH": "$ORIGIN;$ORIGIN/.." + } + }, + + { "name": "debug", "hidden": true, "cacheVariables": { "CMAKE_BUILD_TYPE": "Debug" } }, + { "name": "release", "hidden": true, "cacheVariables": { "CMAKE_BUILD_TYPE": "RelWithDebInfo" } }, + { "name": "static", "hidden": true, "cacheVariables": { "LLAMA_STATIC": "ON" } }, + + { + "name": "arm64-windows-msvc", "hidden": true, + "architecture": { "value": "arm64", "strategy": "external" }, + "toolset": { "value": "host=x86_64", "strategy": "external" }, + "cacheVariables": { + "CMAKE_TOOLCHAIN_FILE": "${sourceDir}/cmake/arm64-windows-msvc.cmake" + } + }, + + { + "name": "arm64-windows-llvm", "hidden": true, + "architecture": { "value": "arm64", "strategy": "external" }, + "toolset": { "value": "host=x86_64", "strategy": "external" }, + "cacheVariables": { + "CMAKE_TOOLCHAIN_FILE": "${sourceDir}/cmake/arm64-windows-llvm.cmake" + } + }, + + { "name": "arm64-windows-llvm-debug" , "inherits": [ "base", "arm64-windows-llvm", "debug" ] }, + { "name": "arm64-windows-llvm-release", "inherits": [ "base", "arm64-windows-llvm", "release" ] }, + { "name": "arm64-windows-llvm+static-release", "inherits": [ "base", "arm64-windows-llvm", "release", "static" ] }, + + { "name": "arm64-windows-msvc-debug" , "inherits": [ "base", "arm64-windows-msvc", "debug" ] }, + { "name": "arm64-windows-msvc-release", "inherits": [ "base", "arm64-windows-msvc", "release" ] }, + { "name": "arm64-windows-msvc+static-release", "inherits": [ "base", "arm64-windows-msvc", "release", "static" ] } + ] +} diff --git a/cmake/arm64-windows-llvm.cmake b/cmake/arm64-windows-llvm.cmake new file mode 100644 index 000000000..46fba6514 --- /dev/null +++ b/cmake/arm64-windows-llvm.cmake @@ -0,0 +1,16 @@ +set( CMAKE_SYSTEM_NAME Windows ) +set( CMAKE_SYSTEM_PROCESSOR arm64 ) + +set( target arm64-pc-windows-msvc ) + +set( CMAKE_C_COMPILER clang ) +set( CMAKE_CXX_COMPILER clang++ ) + +set( CMAKE_C_COMPILER_TARGET ${target} ) +set( CMAKE_CXX_COMPILER_TARGET ${target} ) + +set( arch_c_flags "-march=armv8.7-a -fvectorize -ffp-model=fast" ) +set( warn_c_flags "-Wno-format -Wno-unused-variable -Wno-unused-function -Wno-gnu-zero-variadic-macro-arguments" ) + +set( CMAKE_C_FLAGS_INIT "${arch_c_flags} ${warn_c_flags}" ) +set( CMAKE_CXX_FLAGS_INIT "${arch_c_flags} ${warn_c_flags}" ) diff --git a/cmake/arm64-windows-msvc.cmake b/cmake/arm64-windows-msvc.cmake new file mode 100644 index 000000000..c77631420 --- /dev/null +++ b/cmake/arm64-windows-msvc.cmake @@ -0,0 +1,6 @@ +set( CMAKE_SYSTEM_NAME Windows ) +set( CMAKE_SYSTEM_PROCESSOR arm64 ) + +set( target arm64-pc-windows-msvc ) +set( CMAKE_C_COMPILER_TARGET ${target} ) +set( CMAKE_CXX_COMPILER_TARGET ${target} ) diff --git a/common/log.h b/common/log.h index 6934c57b2..09fa63c26 100644 --- a/common/log.h +++ b/common/log.h @@ -211,7 +211,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std:: #define LOG_FLF_VAL , __FILE__, __LINE__, __FUNCTION__ #else #define LOG_FLF_FMT "[%24s:%5ld][%24s] " - #define LOG_FLF_VAL , __FILE__, __LINE__, __FUNCTION__ + #define LOG_FLF_VAL , __FILE__, (long)__LINE__, __FUNCTION__ #endif #else #define LOG_FLF_FMT "%s" @@ -224,7 +224,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std:: #define LOG_TEE_FLF_VAL , __FILE__, __LINE__, __FUNCTION__ #else #define LOG_TEE_FLF_FMT "[%24s:%5ld][%24s] " - #define LOG_TEE_FLF_VAL , __FILE__, __LINE__, __FUNCTION__ + #define LOG_TEE_FLF_VAL , __FILE__, (long)__LINE__, __FUNCTION__ #endif #else #define LOG_TEE_FLF_FMT "%s" @@ -294,7 +294,7 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std:: // Main LOG macro. // behaves like printf, and supports arguments the exact same way. // -#ifndef _MSC_VER +#if !defined(_MSC_VER) || defined(__clang__) #define LOG(...) LOG_IMPL(__VA_ARGS__, "") #else #define LOG(str, ...) LOG_IMPL("%s" str, "", ##__VA_ARGS__, "") @@ -308,14 +308,14 @@ inline std::string log_filename_generator_impl(LogTriState multilog, const std:: // Secondary target can be changed just like LOG_TARGET // by defining LOG_TEE_TARGET // -#ifndef _MSC_VER +#if !defined(_MSC_VER) || defined(__clang__) #define LOG_TEE(...) LOG_TEE_IMPL(__VA_ARGS__, "") #else #define LOG_TEE(str, ...) LOG_TEE_IMPL("%s" str, "", ##__VA_ARGS__, "") #endif // LOG macro variants with auto endline. -#ifndef _MSC_VER +#if !defined(_MSC_VER) || defined(__clang__) #define LOGLN(...) LOG_IMPL(__VA_ARGS__, "\n") #define LOG_TEELN(...) LOG_TEE_IMPL(__VA_ARGS__, "\n") #else diff --git a/ggml-quants.c b/ggml-quants.c index 9e62a3f32..f13599f6b 100644 --- a/ggml-quants.c +++ b/ggml-quants.c @@ -3487,10 +3487,9 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r #if defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q4_0 * restrict vx0 = vx; - const block_q4_0 * restrict vx1 = vx + bx; - + const block_q4_0 * restrict vx1 = (const block_q4_0 *) ((const uint8_t*)vx + bx); const block_q8_0 * restrict vy0 = vy; - const block_q8_0 * restrict vy1 = vy + by; + const block_q8_0 * restrict vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); float32x4_t sumv0 = vdupq_n_f32(0.0f); @@ -3524,10 +3523,12 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r const int8x16_t y1_l = vld1q_s8(b_y1->qs); const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; + float32_t _scale[4] = { GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; + + float32x4_t scale = vld1q_f32(_scale); int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); @@ -3894,9 +3895,9 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r #if defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q4_1 * restrict vx0 = vx; - const block_q4_1 * restrict vx1 = vx + bx; + const block_q4_1 * restrict vx1 = (const block_q4_1 *) ((const uint8_t*)vx + bx); const block_q8_1 * restrict vy0 = vy; - const block_q8_1 * restrict vy1 = vy + by; + const block_q8_1 * restrict vy1 = (const block_q8_1 *) ((const uint8_t*)vy + by); float32x4_t sumv0 = vdupq_n_f32(0.0f); float32x4_t summs0 = vdupq_n_f32(0.0f); @@ -3907,11 +3908,11 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r const block_q8_1 * restrict b_y0 = &vy0[i]; const block_q8_1 * restrict b_y1 = &vy1[i]; - float32x4_t summs_t = {GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y0->s), - GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y0->s), - GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y1->s), - GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y1->s)}; - summs0 += summs_t; + float32_t summs_t[4] = {GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y0->s), + GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y0->s), + GGML_FP16_TO_FP32(b_x0->m) * GGML_FP16_TO_FP32(b_y1->s), + GGML_FP16_TO_FP32(b_x1->m) * GGML_FP16_TO_FP32(b_y1->s)}; + summs0 = vaddq_f32(summs0, vld1q_f32(summs_t)); const uint8x16_t m4b = vdupq_n_u8(0x0F); @@ -3931,10 +3932,11 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); // mmla into int32x4_t - float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*b_y0->d, - GGML_FP16_TO_FP32(b_x0->d)*b_y1->d, - GGML_FP16_TO_FP32(b_x1->d)*b_y0->d, - GGML_FP16_TO_FP32(b_x1->d)*b_y1->d}; + float32_t _scale[4] = {GGML_FP16_TO_FP32(b_x0->d)*b_y0->d, + GGML_FP16_TO_FP32(b_x0->d)*b_y1->d, + GGML_FP16_TO_FP32(b_x1->d)*b_y0->d, + GGML_FP16_TO_FP32(b_x1->d)*b_y1->d}; + float32x4_t scale = vld1q_f32(_scale); int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); @@ -3953,7 +3955,7 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r float32x4_t sumv1 = vextq_f32(sumv0, sumv0, 2); float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1); - sumv2 = sumv2 + summs0; + sumv2 = vaddq_f32(sumv2, summs0); vst1_f32(s, vget_low_f32(sumv2)); vst1_f32(s + bs, vget_high_f32(sumv2)); @@ -4837,9 +4839,9 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r #if defined(__ARM_FEATURE_MATMUL_INT8) if (nrc == 2) { const block_q8_0 * restrict vx0 = vx; - const block_q8_0 * restrict vx1 = vx + bx; + const block_q8_0 * restrict vx1 = (const block_q8_0 *) ((const uint8_t*)vx + bx); const block_q8_0 * restrict vy0 = vy; - const block_q8_0 * restrict vy1 = vy + by; + const block_q8_0 * restrict vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by); float32x4_t sumv0 = vdupq_n_f32(0.0f); @@ -4861,10 +4863,11 @@ void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * r const int8x16_t y1_l = vld1q_s8(b_y1->qs); const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16); - float32x4_t scale = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), - GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; + float32_t _scale[4] = {GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x0->d)*GGML_FP16_TO_FP32(b_y1->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y0->d), + GGML_FP16_TO_FP32(b_x1->d)*GGML_FP16_TO_FP32(b_y1->d)}; + float32x4_t scale = vld1q_f32(_scale); int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l))); From 172b78210aae0e54d3668c5de14200efab9fac23 Mon Sep 17 00:00:00 2001 From: Max Krasnyansky Date: Wed, 15 May 2024 22:36:43 -0700 Subject: [PATCH 33/51] ci: fix bin/Release path for windows-arm64 builds (#7317) Switch to Ninja Multi-Config CMake generator to resurect bin/Release path that broke artifact packaging in CI. --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2d2fea4a2..0742443c6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -712,9 +712,9 @@ jobs: - build: 'vulkan-x64' defines: '-DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DLLAMA_VULKAN=ON -DBUILD_SHARED_LIBS=ON' - build: 'llvm-arm64' - defines: '-G Ninja -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' + defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-llvm.cmake -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' - build: 'msvc-arm64' - defines: '-G Ninja -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' + defines: '-G "Ninja Multi-Config" -D CMAKE_TOOLCHAIN_FILE=cmake/arm64-windows-msvc.cmake -DLLAMA_NATIVE=OFF -DLLAMA_BUILD_SERVER=ON -DBUILD_SHARED_LIBS=ON' steps: - name: Clone From ad52d5c259344888b06fd5acd3344c663dd0621d Mon Sep 17 00:00:00 2001 From: Vaibhav Srivastav Date: Thu, 16 May 2024 07:38:43 +0200 Subject: [PATCH 34/51] doc: add references to hugging face GGUF-my-repo quantisation web tool. (#7288) * chore: add references to the quantisation space. * fix grammer lol. * Update README.md Co-authored-by: Julien Chaumond * Update README.md Co-authored-by: Georgi Gerganov --------- Co-authored-by: Julien Chaumond Co-authored-by: Georgi Gerganov --- README.md | 3 +++ examples/quantize/README.md | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index ecbe802df..5d6217d13 100644 --- a/README.md +++ b/README.md @@ -712,6 +712,9 @@ Building the program with BLAS support may lead to some performance improvements ### Prepare and Quantize +> [!NOTE] +> You can use the [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space on Hugging Face to quantise your model weights without any setup too. It is synced from `llama.cpp` main every 6 hours. + To obtain the official LLaMA 2 weights please see the Obtaining and using the Facebook LLaMA 2 model section. There is also a large selection of pre-quantized `gguf` models available on Hugging Face. Note: `convert.py` does not support LLaMA 3, you can use `convert-hf-to-gguf.py` with LLaMA 3 downloaded from Hugging Face. diff --git a/examples/quantize/README.md b/examples/quantize/README.md index 8a10365c0..b78ece4e7 100644 --- a/examples/quantize/README.md +++ b/examples/quantize/README.md @@ -1,6 +1,8 @@ # quantize -TODO +You can also use the [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space on Hugging Face to build your own quants without any setup. + +Note: It is synced from llama.cpp `main` every 6 hours. ## Llama 2 7B From 0350f5815218c483fb3026a86adc44a115481625 Mon Sep 17 00:00:00 2001 From: Herman Semenov Date: Thu, 16 May 2024 06:14:24 +0000 Subject: [PATCH 35/51] grammar, json, llama: replace push on emplace if it possible (#7273) --- common/grammar-parser.cpp | 2 +- common/json-schema-to-grammar.cpp | 12 ++++++------ llama.cpp | 4 ++-- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/common/grammar-parser.cpp b/common/grammar-parser.cpp index fecb7cd71..b5bc7d49b 100644 --- a/common/grammar-parser.cpp +++ b/common/grammar-parser.cpp @@ -26,7 +26,7 @@ namespace grammar_parser { static uint32_t get_symbol_id(parse_state & state, const char * src, size_t len) { uint32_t next_id = static_cast(state.symbol_ids.size()); - auto result = state.symbol_ids.insert(std::make_pair(std::string(src, len), next_id)); + auto result = state.symbol_ids.emplace(std::string(src, len), next_id); return result.first->second; } diff --git a/common/json-schema-to-grammar.cpp b/common/json-schema-to-grammar.cpp index 0f8f1b1d4..9a71f5d8d 100644 --- a/common/json-schema-to-grammar.cpp +++ b/common/json-schema-to-grammar.cpp @@ -272,7 +272,7 @@ private: if (literal.empty()) { return false; } - ret.push_back(std::make_pair(literal, true)); + ret.emplace_back(literal, true); literal.clear(); return true; }; @@ -298,7 +298,7 @@ private: while (i < length) { char c = sub_pattern[i]; if (c == '.') { - seq.push_back(std::make_pair(get_dot(), false)); + seq.emplace_back(get_dot(), false); i++; } else if (c == '(') { i++; @@ -307,7 +307,7 @@ private: _warnings.push_back("Unsupported pattern syntax"); } } - seq.push_back(std::make_pair("(" + to_rule(transform()) + ")", false)); + seq.emplace_back("(" + to_rule(transform()) + ")", false); } else if (c == ')') { i++; if (start > 0 && sub_pattern[start - 1] != '(') { @@ -331,9 +331,9 @@ private: } square_brackets += ']'; i++; - seq.push_back(std::make_pair(square_brackets, false)); + seq.emplace_back(square_brackets, false); } else if (c == '|') { - seq.push_back(std::make_pair("|", false)); + seq.emplace_back("|", false); i++; } else if (c == '*' || c == '+' || c == '?') { seq.back() = std::make_pair(to_rule(seq.back()) + c, false); @@ -417,7 +417,7 @@ private: } } if (!literal.empty()) { - seq.push_back(std::make_pair(literal, true)); + seq.emplace_back(literal, true); } } } diff --git a/llama.cpp b/llama.cpp index 7d26966e4..0ef756a52 100644 --- a/llama.cpp +++ b/llama.cpp @@ -17015,13 +17015,13 @@ static size_t llama_state_seq_get_data_internal(struct llama_context * ctx, llam } else { if (cell_range_begin != kv_self.size) { - cell_ranges.push_back({ cell_range_begin, i }); + cell_ranges.emplace_back(cell_range_begin, i); cell_range_begin = kv_self.size; } } } if (cell_range_begin != kv_self.size) { - cell_ranges.push_back({ cell_range_begin, kv_self.size }); + cell_ranges.emplace_back(cell_range_begin, kv_self.size); } // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count From dda64fc17c97820ea9489eb0cc9ae8b8fdce4926 Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Thu, 16 May 2024 02:15:23 -0400 Subject: [PATCH 36/51] convert : get general.name from model dir, not its parent (#5615) Co-authored-by: Brian --- convert.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert.py b/convert.py index e2e642351..da1247957 100755 --- a/convert.py +++ b/convert.py @@ -1109,7 +1109,7 @@ class OutputFile: if metadata is not None and metadata.name is not None: name = metadata.name elif params.path_model is not None: - name = str(params.path_model.parent).split("/")[-1] + name = params.path_model.name elif params.n_ctx == 4096: # Heuristic detection of LLaMA v2 model name = "LLaMA v2" From 3b3963c55c8332e33533c44b2aa882b0e45f8292 Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Wed, 15 May 2024 15:29:07 +0300 Subject: [PATCH 37/51] rpc : add command line arg for specifying backend memory ref: #7293 --- examples/rpc/README.md | 4 +-- examples/rpc/rpc-server.cpp | 68 +++++++++++++++++++++++++++++++------ ggml-rpc.cpp | 2 +- 3 files changed, 60 insertions(+), 14 deletions(-) diff --git a/examples/rpc/README.md b/examples/rpc/README.md index 325d0abc4..eeec71a8e 100644 --- a/examples/rpc/README.md +++ b/examples/rpc/README.md @@ -42,7 +42,7 @@ cmake --build . --config Release Then, start the `rpc-server` with the backend: ```bash -$ bin/rpc-server 0.0.0.0 50052 +$ bin/rpc-server -p 50052 create_backend: using CUDA backend ggml_cuda_init: GGML_CUDA_FORCE_MMQ: no ggml_cuda_init: CUDA_USE_TENSOR_CORES: yes @@ -53,7 +53,7 @@ Starting RPC server on 0.0.0.0:50052 When using the CUDA backend, you can specify the device with the `CUDA_VISIBLE_DEVICES` environment variable, e.g.: ```bash -$ CUDA_VISIBLE_DEVICES=0 bin/rpc-server 0.0.0.0 50052 +$ CUDA_VISIBLE_DEVICES=0 bin/rpc-server -p 50052 ``` This way you can run multiple `rpc-server` instances on the same host, each with a different CUDA device. diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index 496af8496..021185b83 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -10,6 +10,52 @@ #include #include +struct rpc_server_params { + std::string host = "0.0.0.0"; + int port = 50052; + size_t backend_mem = 0; +}; + +static void print_usage(int /*argc*/, char ** argv, rpc_server_params params) { + fprintf(stderr, "Usage: %s [options]\n\n", argv[0]); + fprintf(stderr, "options:\n"); + fprintf(stderr, " -h, --help show this help message and exit\n"); + fprintf(stderr, " -H HOST, --host HOST host to bind to (default: %s)\n", params.host.c_str()); + fprintf(stderr, " -p PORT, --port PORT port to bind to (default: %d)\n", params.port); + fprintf(stderr, " -m MEM, --mem MEM backend memory size (in MB)\n"); + fprintf(stderr, "\n"); +} + +static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & params) { + std::string arg; + for (int i = 1; i < argc; i++) { + arg = argv[i]; + if (arg == "-H" || arg == "--host") { + if (++i >= argc) { + return false; + } + params.host = argv[i]; + } else if (arg == "-p" || arg == "--port") { + if (++i >= argc) { + return false; + } + params.port = std::stoi(argv[i]); + if (params.port <= 0 || params.port > 65535) { + return false; + } + } else if (arg == "-m" || arg == "--mem") { + if (++i >= argc) { + return false; + } + params.backend_mem = std::stoul(argv[i]) * 1024 * 1024; + } else if (arg == "-h" || arg == "--help") { + print_usage(argc, argv, params); + exit(0); + } + } + return true; +} + static ggml_backend_t create_backend() { ggml_backend_t backend = NULL; #ifdef GGML_USE_CUDA @@ -45,14 +91,9 @@ static void get_backend_memory(size_t * free_mem, size_t * total_mem) { } int main(int argc, char * argv[]) { - if (argc < 3) { - fprintf(stderr, "Usage: %s \n", argv[0]); - return 1; - } - const char * host = argv[1]; - int port = std::stoi(argv[2]); - if (port <= 0 || port > 65535) { - fprintf(stderr, "Invalid port number: %d\n", port); + rpc_server_params params; + if (!rpc_server_params_parse(argc, argv, params)) { + fprintf(stderr, "Invalid parameters\n"); return 1; } ggml_backend_t backend = create_backend(); @@ -60,10 +101,15 @@ int main(int argc, char * argv[]) { fprintf(stderr, "Failed to create backend\n"); return 1; } - printf("Starting RPC server on %s:%d\n", host, port); + std::string endpoint = params.host + ":" + std::to_string(params.port); size_t free_mem, total_mem; - get_backend_memory(&free_mem, &total_mem); - std::string endpoint = std::string(host) + ":" + std::to_string(port); + if (params.backend_mem > 0) { + free_mem = params.backend_mem; + total_mem = params.backend_mem; + } else { + get_backend_memory(&free_mem, &total_mem); + } + printf("Starting RPC server on %s, backend memory: %zu MB\n", endpoint.c_str(), free_mem / (1024 * 1024)); start_rpc_server(backend, endpoint.c_str(), free_mem, total_mem); ggml_backend_free(backend); return 0; diff --git a/ggml-rpc.cpp b/ggml-rpc.cpp index efeacb297..ba392009f 100644 --- a/ggml-rpc.cpp +++ b/ggml-rpc.cpp @@ -28,7 +28,7 @@ #define UNUSED GGML_UNUSED -#define GGML_DEBUG 1 +#define GGML_DEBUG 0 #if (GGML_DEBUG >= 1) #define GGML_PRINT_DEBUG(...) printf(__VA_ARGS__) #else From 9afdffe70ebf3166d429b4434783bb0b7f97bdeb Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Wed, 15 May 2024 16:04:40 +0300 Subject: [PATCH 38/51] rpc : get available mem for the CPU backend This can be overridden with the -m command line option ref: #7293 --- examples/rpc/rpc-server.cpp | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index 021185b83..41f377376 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -7,6 +7,11 @@ #endif #include "ggml-rpc.h" +#ifdef _WIN32 +# include +#else +# include +#endif #include #include @@ -84,9 +89,18 @@ static void get_backend_memory(size_t * free_mem, size_t * total_mem) { #ifdef GGML_USE_CUDA ggml_backend_cuda_get_device_memory(0, free_mem, total_mem); #else - // TODO: implement for other backends - *free_mem = 1; - *total_mem = 1; + #ifdef _WIN32 + MEMORYSTATUSEX status; + status.dwLength = sizeof(status); + GlobalMemoryStatusEx(&status); + *total_mem = status.ullTotalPhys; + *free_mem = status.ullAvailPhys; + #else + long pages = sysconf(_SC_PHYS_PAGES); + long page_size = sysconf(_SC_PAGE_SIZE); + *total_mem = pages * page_size; + *free_mem = *total_mem; + #endif #endif } From 24ecb58168dce81646c2ed425690a106591c8c6d Mon Sep 17 00:00:00 2001 From: Pierrick Hymbert Date: Thu, 16 May 2024 20:43:45 +0200 Subject: [PATCH 39/51] Revert "server bench: fix bench not waiting for model load (#7284)" (#7334) This reverts commit 583fd6b000ec9ad1b465b5c98524f4a0ae388077. --- examples/server/bench/bench.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/examples/server/bench/bench.py b/examples/server/bench/bench.py index 25ac29c4c..86c5de101 100644 --- a/examples/server/bench/bench.py +++ b/examples/server/bench/bench.py @@ -293,14 +293,13 @@ def start_server_background(args): def is_server_listening(server_fqdn, server_port): - try: - url = f"{server_fqdn}:{server_port}/health" - if not url.startswith("http://"): - url = f"http://{url}" - result = requests.get(url) - return result.status_code == 200 - except Exception: - return False + with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock: + result = sock.connect_ex((server_fqdn, server_port)) + _is_server_listening = result == 0 + if _is_server_listening: + print(f"server is listening on {server_fqdn}:{server_port}...") + return _is_server_listening + def escape_metric_name(metric_name): return re.sub('[^A-Z0-9]', '_', metric_name.upper()) From 9c4fdcbec8c7fcc428e723b0d8a1cf1f351ba642 Mon Sep 17 00:00:00 2001 From: Leon Knauer Date: Fri, 17 May 2024 02:11:03 +0200 Subject: [PATCH 40/51] [Server] Added --verbose option to README [no ci] (#7335) --- examples/server/README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/server/README.md b/examples/server/README.md index f6eb6942c..13cbdcd2c 100644 --- a/examples/server/README.md +++ b/examples/server/README.md @@ -17,7 +17,8 @@ The project is under active development, and we are [looking for feedback and co **Command line options:** -- `--threads N`, `-t N`: Set the number of threads to use during generation. Not used if model layers are offloaded to GPU. The server is using batching. This parameter is used only if one token is to be processed on CPU backend. +- `-v`, `--verbose`: Enable verbose server output. When using the `/completion` endpoint, this includes the tokenized prompt, the full request and the full response. +- `-t N`, `--threads N`: Set the number of threads to use during generation. Not used if model layers are offloaded to GPU. The server is using batching. This parameter is used only if one token is to be processed on CPU backend. - `-tb N, --threads-batch N`: Set the number of threads to use during batch and prompt processing. If not specified, the number of threads will be set to the number of threads used for generation. Not used if model layers are offloaded to GPU. - `--threads-http N`: Number of threads in the http server pool to process requests. Default: `max(std::thread::hardware_concurrency() - 1, --parallel N + 2)` - `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`). @@ -36,9 +37,7 @@ The project is under active development, and we are [looking for feedback and co - `--numa STRATEGY`: Attempt one of the below optimization strategies that may help on some NUMA systems - `--numa distribute`: Spread execution evenly over all nodes - `--numa isolate`: Only spawn threads on CPUs on the node that execution started on -- `--numa numactl`: Use the CPU map provided by numactl. If run without this previously, it is recommended to drop the system -page cache before using this. See https://github.com/ggerganov/llama.cpp/issues/1437 - +- `--numa numactl`: Use the CPU map provided by numactl. If run without this previously, it is recommended to drop the system page cache before using this. See https://github.com/ggerganov/llama.cpp/issues/1437 - `--numa`: Attempt optimizations that may help on some NUMA systems. - `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains. - `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation. From 934266c0e0b2aa9781fdba2deb112c161ff038a9 Mon Sep 17 00:00:00 2001 From: Justine Tunney Date: Fri, 17 May 2024 02:58:52 -0400 Subject: [PATCH 41/51] ggml : rewrite silu and softmax for cpu (#7154) This change upstreams llamafile's vectorized expf() functions. This lets us compute softmax and silu more accurately than the short[65536] lookup table that GGML previously used to make this operation go faster. We can support aarch64 and sse2+ with the worst case rounding error of 2ulp. It makes make -j8 tests && ./tests/test-backend-ops -o SOFT_MAX -b CPU perf go 1.5x faster for SSE2+FMA, 1.9x faster for AVX2+FMA and 2.1x on AVX512 --- ggml.c | 476 ++++++++++++++++++++++++++++++++++----------------------- 1 file changed, 283 insertions(+), 193 deletions(-) diff --git a/ggml.c b/ggml.c index 684d84235..55152bce4 100644 --- a/ggml.c +++ b/ggml.c @@ -165,9 +165,6 @@ void ggml_print_backtrace(void) { #define GGML_DEBUG 0 #define GGML_GELU_FP16 #define GGML_GELU_QUICK_FP16 -#define GGML_SILU_FP16 -// #define GGML_CROSS_ENTROPY_EXP_FP16 -// #define GGML_FLASH_ATTN_EXP_FP16 #define GGML_SOFT_MAX_UNROLL 4 #define GGML_VEC_DOT_UNROLL 2 @@ -318,12 +315,6 @@ static ggml_fp16_t ggml_table_gelu_f16[1 << 16]; // precomputed quick gelu table for f16 (128 KB) static ggml_fp16_t ggml_table_gelu_quick_f16[1 << 16]; -// precomputed silu table for f16 (128 KB) -static ggml_fp16_t ggml_table_silu_f16[1 << 16]; - -// precomputed exp table for f16 (128 KB) -static ggml_fp16_t ggml_table_exp_f16[1 << 16]; - // precomputed f32 table for f16 (256 KB) (ggml-impl.h) float ggml_table_f32_f16[1 << 16]; @@ -2085,52 +2076,291 @@ inline static float ggml_silu_f32(float x) { return x/(1.0f + expf(-x)); } -//inline static void ggml_vec_silu_f16(const int n, ggml_fp16_t * y, const ggml_fp16_t * x) { -// const uint16_t * i16 = (const uint16_t *) x; -// for (int i = 0; i < n; ++i) { -// y[i] = ggml_table_silu_f16[i16[i]]; -// } -//} +#if defined(__ARM_NEON) -#ifdef GGML_SILU_FP16 -inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) { - uint16_t t; - for (int i = 0; i < n; ++i) { - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); - memcpy(&t, &fp16, sizeof(uint16_t)); - y[i] = GGML_FP16_TO_FP32(ggml_table_silu_f16[t]); - } +// adapted from arm limited optimized routine +// the maximum error is 1.45358 plus 0.5 ulps +// numbers above 88.38 will flush to infinity +// numbers beneath -103.97 will flush to zero +inline static float32x4_t ggml_v_expf(float32x4_t x) { + const float32x4_t r = vdupq_n_f32(0x1.8p23f); + const float32x4_t z = vfmaq_f32(r, x, vdupq_n_f32(0x1.715476p+0f)); + const float32x4_t n = vsubq_f32(z, r); + const float32x4_t b = vfmsq_f32(vfmsq_f32(x, n, vdupq_n_f32(0x1.62e4p-1f)), n, + vdupq_n_f32(0x1.7f7d1cp-20f)); + const uint32x4_t e = vshlq_n_u32(vreinterpretq_u32_f32(z), 23); + const float32x4_t k = vreinterpretq_f32_u32(vaddq_u32(e, vreinterpretq_u32_f32(vdupq_n_f32(1)))); + const uint32x4_t c = vcagtq_f32(n, vdupq_n_f32(126)); + const float32x4_t u = vmulq_f32(b, b); + const float32x4_t j = vfmaq_f32( + vmulq_f32(vdupq_n_f32(0x1.ffffecp-1f), b), + vfmaq_f32(vfmaq_f32(vdupq_n_f32(0x1.fffdb6p-2f), vdupq_n_f32(0x1.555e66p-3f), b), + vfmaq_f32(vdupq_n_f32(0x1.573e2ep-5f), vdupq_n_f32(0x1.0e4020p-7f), b), u), u); + if (!vpaddd_u64(vreinterpretq_u64_u32(c))) + return vfmaq_f32(k, j, k); + const uint32x4_t d = vandq_u32(vclezq_f32(n), vdupq_n_u32(0x82000000)); + const float32x4_t s1 = vreinterpretq_f32_u32(vaddq_u32(d, vdupq_n_u32(0x7f000000))); + const float32x4_t s2 = vreinterpretq_f32_u32(vsubq_u32(e, d)); + return vbslq_f32(vcagtq_f32(n, vdupq_n_f32(192)), vmulq_f32(s1, s1), + vbslq_f32(c, vmulq_f32(vfmaq_f32(s2, s2, j), s1), vfmaq_f32(k, k, j))); } + +// computes silu x/(1+exp(-x)) in single precision vector +inline static float32x4_t ggml_v_silu(float32x4_t x) { + const float32x4_t one = vdupq_n_f32(1.0f); + const float32x4_t zero = vdupq_n_f32(0.0f); + const float32x4_t neg_x = vsubq_f32(zero, x); + const float32x4_t exp_neg_x = ggml_v_expf(neg_x); + const float32x4_t one_plus_exp_neg_x = vaddq_f32(one, exp_neg_x); + return vdivq_f32(x, one_plus_exp_neg_x); +} + +#elif defined(__AVX512F__) && defined(__AVX512DQ__) + +// adapted from arm limited optimized routine +// the maximum error is 1.45358 plus 0.5 ulps +// numbers above 88.38 will flush to infinity +// numbers beneath -103.97 will flush to zero +inline static __m512 ggml_v_expf(__m512 x) { + const __m512 r = _mm512_set1_ps(0x1.8p23f); + const __m512 z = _mm512_fmadd_ps(x, _mm512_set1_ps(0x1.715476p+0f), r); + const __m512 n = _mm512_sub_ps(z, r); + const __m512 b = _mm512_fnmadd_ps(n, _mm512_set1_ps(0x1.7f7d1cp-20f), + _mm512_fnmadd_ps(n, _mm512_set1_ps(0x1.62e4p-1f), x)); + const __m512i e = _mm512_slli_epi32(_mm512_castps_si512(z), 23); + const __m512 k = _mm512_castsi512_ps(_mm512_add_epi32(e, _mm512_castps_si512(_mm512_set1_ps(1)))); + const __mmask16 c = _mm512_cmp_ps_mask(_mm512_abs_ps(n), _mm512_set1_ps(126), _CMP_GT_OQ); + const __m512 u = _mm512_mul_ps(b, b); + const __m512 j = _mm512_fmadd_ps(_mm512_fmadd_ps(_mm512_fmadd_ps(_mm512_set1_ps(0x1.0e4020p-7f), b, + _mm512_set1_ps(0x1.573e2ep-5f)), u, + _mm512_fmadd_ps(_mm512_set1_ps(0x1.555e66p-3f), b, + _mm512_set1_ps(0x1.fffdb6p-2f))), + u, _mm512_mul_ps(_mm512_set1_ps(0x1.ffffecp-1f), b)); + if (_mm512_kortestz(c, c)) + return _mm512_fmadd_ps(j, k, k); + const __m512i g = _mm512_and_si512( + _mm512_movm_epi32(_mm512_cmp_ps_mask(n, _mm512_setzero_ps(), _CMP_LE_OQ)), + _mm512_set1_epi32(0x82000000u)); + const __m512 s1 = + _mm512_castsi512_ps(_mm512_add_epi32(g, _mm512_set1_epi32(0x7f000000u))); + const __m512 s2 = _mm512_castsi512_ps(_mm512_sub_epi32(e, g)); + const __mmask16 d = + _mm512_cmp_ps_mask(_mm512_abs_ps(n), _mm512_set1_ps(192), _CMP_GT_OQ); + return _mm512_mask_blend_ps( + d, _mm512_mask_blend_ps( + c, _mm512_fmadd_ps(k, j, k), + _mm512_mul_ps(_mm512_fmadd_ps(s2, j, s2), s1)), + _mm512_mul_ps(s1, s1)); +} + +// computes silu x/(1+exp(-x)) in single precision vector +inline static __m512 ggml_v_silu(__m512 x) { + const __m512 one = _mm512_set1_ps(1); + const __m512 zero = _mm512_setzero_ps(); + const __m512 neg_x = _mm512_sub_ps(zero, x); + const __m512 exp_neg_x = ggml_v_expf(neg_x); + const __m512 one_plus_exp_neg_x = _mm512_add_ps(one, exp_neg_x); + return _mm512_div_ps(x, one_plus_exp_neg_x); +} + +#elif defined(__AVX2__) && defined(__FMA__) + +// adapted from arm limited optimized routine +// the maximum error is 1.45358 plus 0.5 ulps +// numbers above 88.38 will flush to infinity +// numbers beneath -103.97 will flush to zero +inline static __m256 ggml_v_expf(__m256 x) { + const __m256 r = _mm256_set1_ps(0x1.8p23f); + const __m256 z = _mm256_fmadd_ps(x, _mm256_set1_ps(0x1.715476p+0f), r); + const __m256 n = _mm256_sub_ps(z, r); + const __m256 b = _mm256_fnmadd_ps(n, _mm256_set1_ps(0x1.7f7d1cp-20f), + _mm256_fnmadd_ps(n, _mm256_set1_ps(0x1.62e4p-1f), x)); + const __m256i e = _mm256_slli_epi32(_mm256_castps_si256(z), 23); + const __m256 k = _mm256_castsi256_ps( + _mm256_add_epi32(e, _mm256_castps_si256(_mm256_set1_ps(1)))); + const __m256i c = _mm256_castps_si256( + _mm256_cmp_ps(_mm256_andnot_ps(_mm256_set1_ps(-0.f), n), + _mm256_set1_ps(126), _CMP_GT_OQ)); + const __m256 u = _mm256_mul_ps(b, b); + const __m256 j = _mm256_fmadd_ps(_mm256_fmadd_ps(_mm256_fmadd_ps(_mm256_set1_ps(0x1.0e4020p-7f), b, + _mm256_set1_ps(0x1.573e2ep-5f)), u, + _mm256_fmadd_ps(_mm256_set1_ps(0x1.555e66p-3f), b, + _mm256_set1_ps(0x1.fffdb6p-2f))), + u, _mm256_mul_ps(_mm256_set1_ps(0x1.ffffecp-1f), b)); + if (!_mm256_movemask_ps(_mm256_castsi256_ps(c))) + return _mm256_fmadd_ps(j, k, k); + const __m256i g = _mm256_and_si256( + _mm256_castps_si256(_mm256_cmp_ps(n, _mm256_setzero_ps(), _CMP_LE_OQ)), + _mm256_set1_epi32(0x82000000u)); + const __m256 s1 = + _mm256_castsi256_ps(_mm256_add_epi32(g, _mm256_set1_epi32(0x7f000000u))); + const __m256 s2 = _mm256_castsi256_ps(_mm256_sub_epi32(e, g)); + const __m256i d = _mm256_castps_si256( + _mm256_cmp_ps(_mm256_andnot_ps(_mm256_set1_ps(-0.f), n), + _mm256_set1_ps(192), _CMP_GT_OQ)); + return _mm256_or_ps( + _mm256_and_ps(_mm256_castsi256_ps(d), _mm256_mul_ps(s1, s1)), + _mm256_andnot_ps( + _mm256_castsi256_ps(d), + _mm256_or_ps( + _mm256_and_ps(_mm256_castsi256_ps(c), + _mm256_mul_ps(_mm256_fmadd_ps(s2, j, s2), s1)), + _mm256_andnot_ps(_mm256_castsi256_ps(c), _mm256_fmadd_ps(k, j, k))))); +} + +// computes silu x/(1+exp(-x)) in single precision vector +inline static __m256 ggml_v_silu(__m256 x) { + const __m256 one = _mm256_set1_ps(1); + const __m256 zero = _mm256_setzero_ps(); + const __m256 neg_x = _mm256_sub_ps(zero, x); + const __m256 exp_neg_x = ggml_v_expf(neg_x); + const __m256 one_plus_exp_neg_x = _mm256_add_ps(one, exp_neg_x); + return _mm256_div_ps(x, one_plus_exp_neg_x); +} + +#elif defined(__SSE2__) // __AVX2__ / __ARM_NEON + +#if defined(__FMA__) +#define MADD128(x, y, z) _mm_fmadd_ps(x, y, z) +#define NMADD128(x, y, z) _mm_fnmadd_ps(x, y, z) #else -inline static void ggml_vec_silu_f32(const int n, float * y, const float * x) { - for (int i = 0; i < n; ++i) { +#define MADD128(x, y, z) _mm_add_ps(_mm_mul_ps(x, y), z) +#define NMADD128(x, y, z) _mm_sub_ps(z, _mm_mul_ps(x, y)) +#endif + +// adapted from arm limited optimized routine +// the maximum error is 1.45358 plus 0.5 ulps +// numbers above 88.38 will flush to infinity +// numbers beneath -103.97 will flush to zero +inline static __m128 ggml_v_expf(__m128 x) { + const __m128 r = _mm_set1_ps(0x1.8p23f); + const __m128 z = MADD128(x, _mm_set1_ps(0x1.715476p+0f), r); + const __m128 n = _mm_sub_ps(z, r); + const __m128 b = + NMADD128(n, _mm_set1_ps(0x1.7f7d1cp-20f), NMADD128(n, _mm_set1_ps(0x1.62e4p-1f), x)); + const __m128i e = _mm_slli_epi32(_mm_castps_si128(z), 23); + const __m128 k = _mm_castsi128_ps(_mm_add_epi32(e, _mm_castps_si128(_mm_set1_ps(1)))); + const __m128i c = + _mm_castps_si128(_mm_cmpgt_ps(_mm_andnot_ps(_mm_set1_ps(-0.f), n), _mm_set1_ps(126))); + const __m128 u = _mm_mul_ps(b, b); + const __m128 j = + MADD128(MADD128(MADD128(_mm_set1_ps(0x1.0e4020p-7f), b, _mm_set1_ps(0x1.573e2ep-5f)), u, + MADD128(_mm_set1_ps(0x1.555e66p-3f), b, _mm_set1_ps(0x1.fffdb6p-2f))), + u, _mm_mul_ps(_mm_set1_ps(0x1.ffffecp-1f), b)); + if (!_mm_movemask_epi8(c)) + return MADD128(j, k, k); + const __m128i g = _mm_and_si128(_mm_castps_si128(_mm_cmple_ps(n, _mm_setzero_ps())), + _mm_set1_epi32(0x82000000u)); + const __m128 s1 = _mm_castsi128_ps(_mm_add_epi32(g, _mm_set1_epi32(0x7f000000u))); + const __m128 s2 = _mm_castsi128_ps(_mm_sub_epi32(e, g)); + const __m128i d = + _mm_castps_si128(_mm_cmpgt_ps(_mm_andnot_ps(_mm_set1_ps(-0.f), n), _mm_set1_ps(192))); + return _mm_or_ps( + _mm_and_ps(_mm_castsi128_ps(d), _mm_mul_ps(s1, s1)), + _mm_andnot_ps(_mm_castsi128_ps(d), + _mm_or_ps(_mm_and_ps(_mm_castsi128_ps(c), _mm_mul_ps(MADD128(s2, j, s2), s1)), + _mm_andnot_ps(_mm_castsi128_ps(c), MADD128(k, j, k))))); +} + +// computes silu x/(1+exp(-x)) in single precision vector +inline static __m128 ggml_v_silu(__m128 x) { + const __m128 one = _mm_set1_ps(1); + const __m128 zero = _mm_setzero_ps(); + const __m128 neg_x = _mm_sub_ps(zero, x); + const __m128 exp_neg_x = ggml_v_expf(neg_x); + const __m128 one_plus_exp_neg_x = _mm_add_ps(one, exp_neg_x); + return _mm_div_ps(x, one_plus_exp_neg_x); +} + +#endif // __ARM_NEON / __AVX2__ / __SSE2__ + +static void ggml_vec_silu_f32(const int n, float * y, const float * x) { + int i = 0; +#if defined(__AVX512F__) && defined(__AVX512DQ__) + for (; i + 15 < n; i += 16) { + _mm512_storeu_ps(y + i, ggml_v_silu(_mm512_loadu_ps(x + i))); + } +#elif defined(__AVX2__) && defined(__FMA__) + for (; i + 7 < n; i += 8) { + _mm256_storeu_ps(y + i, ggml_v_silu(_mm256_loadu_ps(x + i))); + } +#elif defined(__SSE2__) + for (; i + 3 < n; i += 4) { + _mm_storeu_ps(y + i, ggml_v_silu(_mm_loadu_ps(x + i))); + } +#elif defined(__ARM_NEON) + for (; i + 3 < n; i += 4) { + vst1q_f32(y + i, ggml_v_silu(vld1q_f32(x + i))); + } +#endif + for (; i < n; ++i) { y[i] = ggml_silu_f32(x[i]); } } + +static ggml_float ggml_vec_soft_max_f32(const int n, float * y, const float * x, float max) { + int i = 0; + ggml_float sum = 0; +#if defined(__AVX512F__) && defined(__AVX512DQ__) + for (; i + 15 < n; i += 16) { + __m512 val = ggml_v_expf(_mm512_sub_ps(_mm512_loadu_ps(x + i), + _mm512_set1_ps(max))); + _mm512_storeu_ps(y + i, val); + sum += (ggml_float)_mm512_reduce_add_ps(val); + } +#elif defined(__AVX2__) && defined(__FMA__) + for (; i + 7 < n; i += 8) { + __m256 val = ggml_v_expf(_mm256_sub_ps(_mm256_loadu_ps(x + i), + _mm256_set1_ps(max))); + _mm256_storeu_ps(y + i, val); + __m128 val2 = _mm_add_ps(_mm256_extractf128_ps(val, 1), + _mm256_castps256_ps128(val)); + val2 = _mm_add_ps(val2, _mm_movehl_ps(val2, val2)); + val2 = _mm_add_ss(val2, _mm_movehdup_ps(val2)); + sum += (ggml_float)_mm_cvtss_f32(val2); + } +#elif defined(__SSE2__) + for (; i + 3 < n; i += 4) { + __m128 val = ggml_v_expf(_mm_sub_ps(_mm_loadu_ps(x + i), + _mm_set1_ps(max))); + _mm_storeu_ps(y + i, val); +#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) + val = _mm_add_ps(val, _mm_movehl_ps(val, val)); + val = _mm_add_ss(val, _mm_movehdup_ps(val)); +#else + __m128 tmp = _mm_shuffle_ps(val, val, _MM_SHUFFLE(2, 3, 0, 1)); + val = _mm_add_ps(val, tmp); + tmp = _mm_movehl_ps(tmp, val); + val = _mm_add_ss(val, tmp); #endif + sum += (ggml_float)_mm_cvtss_f32(val); + } +#elif defined(__ARM_NEON) + for (; i + 3 < n; i += 4) { + float32x4_t val = ggml_v_expf(vsubq_f32(vld1q_f32(x + i), + vdupq_n_f32(max))); + vst1q_f32(y + i, val); + sum += (ggml_float)vaddvq_f32(val); + } +#endif + for (; i < n; ++i) { + float val = expf(x[i] - max); + sum += (ggml_float)val; + y[i] = val; + } + return sum; +} inline static float ggml_silu_backward_f32(float x, float dy) { const float s = 1.0f/(1.0f + expf(-x)); return dy*s*(1.0f + x*(1.0f - s)); } -#ifdef GGML_SILU_FP16 -inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) { - for (int i = 0; i < n; ++i) { - // we did not use x[i] to compute forward silu but its f16 equivalent - // take derivative at f16 of x[i]: - ggml_fp16_t fp16 = GGML_FP32_TO_FP16(x[i]); - float usedx = GGML_FP16_TO_FP32(fp16); - dx[i] = ggml_silu_backward_f32(usedx, dy[i]); - } -} -#else inline static void ggml_vec_silu_backward_f32(const int n, float * dx, const float * x, const float * dy) { for (int i = 0; i < n; ++i) { dx[i] = ggml_silu_backward_f32(x[i], dy[i]); } } -#endif inline static void ggml_vec_sum_f32(const int n, float * s, const float * x) { #ifndef GGML_USE_ACCELERATE @@ -2922,8 +3152,6 @@ struct ggml_context * ggml_init(struct ggml_init_params params) { float f = ggml_table_f32_f16[i] = GGML_COMPUTE_FP16_TO_FP32(u.fp16); ggml_table_gelu_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_f32(f)); ggml_table_gelu_quick_f16[i] = GGML_FP32_TO_FP16(ggml_gelu_quick_f32(f)); - ggml_table_silu_f16[i] = GGML_FP32_TO_FP16(ggml_silu_f32(f)); - ggml_table_exp_f16[i] = GGML_FP32_TO_FP16(expf(f)); } const uint64_t t_end = ggml_time_us(); UNUSED(t_end); @@ -13600,22 +13828,7 @@ static void ggml_compute_forward_soft_max_f32( float max = -INFINITY; ggml_vec_max_f32(nc, &max, wp); - ggml_float sum = 0.0; - - uint16_t scvt; - for (int i = 0; i < nc; i++) { - if (wp[i] == -INFINITY) { - dp[i] = 0.0f; - } else { - // const float val = (wp[i] == -INFINITY) ? 0.0 : exp(wp[i] - max); - ggml_fp16_t s = GGML_FP32_TO_FP16(wp[i] - max); - memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]); - sum += (ggml_float)val; - dp[i] = val; - } - } - + ggml_float sum = ggml_vec_soft_max_f32(nc, dp, wp, max); assert(sum > 0.0); sum = 1.0/sum; @@ -15374,37 +15587,7 @@ static void ggml_compute_forward_flash_attn_f32( vvexpf(S, S, &Mup); ggml_vec_sum_f32(Mup, &sum, S); #else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt); - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; - - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { - if (i >= masked_begin) { - break; - } - float * SS = S + i; - - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { - if (i + j >= masked_begin) { - break; - } else if (SS[j] == -INFINITY) { - SS[j] = 0.0f; - } else { -#ifndef GGML_FLASH_ATTN_EXP_FP16 - const float val = expf(SS[j] - max); -#else - ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); - memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]); -#endif - sump[j] += (ggml_float)val; - SS[j] = val; - } - } - } - - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { - sum += sump[i]; - } + sum = ggml_vec_soft_max_f32(Mup, S, S, max); #endif } @@ -15586,28 +15769,7 @@ static void ggml_compute_forward_flash_attn_f16( vvexpf(S, S, &Mup); ggml_vec_sum_f32(Mup, &sum, S); #else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; - - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { - float * SS = S + i; - - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { - if (SS[j] == -INFINITY) { - SS[j] = 0.0f; - } else { - ggml_fp16_t s = GGML_FP32_TO_FP16(SS[j] - max); - memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]); - sump[j] += (ggml_float)val; - SS[j] = val; - } - } - } - - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { - sum += sump[i]; - } + sum = ggml_vec_soft_max_f32(Mup, S, S, max); #endif } @@ -16234,38 +16396,7 @@ static void ggml_compute_forward_flash_attn_back_f32( vvexpf(SM, SM, &Mup); ggml_vec_sum_f32(Mup, &sum, SM); #else - uint16_t scvt[GGML_SOFT_MAX_UNROLL]; UNUSED(scvt); - ggml_float sump[GGML_SOFT_MAX_UNROLL] = { 0.0 }; - - for (int i = 0; i < Mup; i += GGML_SOFT_MAX_UNROLL) { - if (i >= masked_begin) { - break; - } - float * SR = S + i; - float * SW = SM + i; - - for (int j = 0; j < GGML_SOFT_MAX_UNROLL; ++j) { - if (i + j >= masked_begin) { - break; - } else if (SR[j] == -INFINITY) { - SW[j] = 0.0f; - } else { -#ifndef GGML_FLASH_ATTN_EXP_FP16 - const float val = expf(SR[j] - max); -#else - ggml_fp16_t s = GGML_FP32_TO_FP16(SR[j] - max); - memcpy(&scvt[j], &s, sizeof(uint16_t)); - const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt[j]]); -#endif - sump[j] += (ggml_float)val; - SW[j] = val; - } - } - } - - for (int i = 0; i < GGML_SOFT_MAX_UNROLL; i++) { - sum += sump[i]; - } + sum = ggml_vec_soft_max_f32(Mup, SM, S, max); #endif } @@ -17291,35 +17422,15 @@ static void ggml_compute_forward_cross_entropy_loss_f32( assert(!isnan(s1[i])); } #endif + // soft_max - ggml_float sum = 0.0; - { - float max = -INFINITY; - ggml_vec_max_f32(nc, &max, s0); - - uint16_t scvt; UNUSED(scvt); - for (int i = 0; i < nc; i++) { - if (s0[i] == -INFINITY) { - st[i] = 0.0f; - } else { -#ifndef GGML_CROSS_ENTROPY_EXP_FP16 - const float s = s0[i] - max; - const float val = expf(s); -#else - ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max); - memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]); -#endif - sum += (ggml_float)val; - st[i] = val; - } - } - - assert(sum > 0.0); - // sum = 1.0/sum; - } - // avoid log(0) by rescaling from [0..1] to [eps..1] + float max = -INFINITY; + ggml_vec_max_f32(nc, &max, s0); + ggml_float sum = ggml_vec_soft_max_f32(nc, st, s0, max); + assert(sum > 0.0); sum = (1.0 - eps) / sum; + + // avoid log(0) by rescaling from [0..1] to [eps..1] ggml_vec_scale_f32(nc, st, sum); ggml_vec_add1_f32(nc, st, st, eps); ggml_vec_log_f32(nc, st, st); @@ -17409,32 +17520,11 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32( #endif // soft_max - ggml_float sum = 0.0; - { - float max = -INFINITY; - ggml_vec_max_f32(nc, &max, s0); - - uint16_t scvt; UNUSED(scvt); - for (int i = 0; i < nc; i++) { - if (s0[i] == -INFINITY) { - ds0[i] = 0.0f; - } else { -#ifndef GGML_CROSS_ENTROPY_EXP_FP16 - const float s = s0[i] - max; - const float val = expf(s); -#else - ggml_fp16_t s = GGML_FP32_TO_FP16(s0[i] - max); - memcpy(&scvt, &s, sizeof(scvt)); - const float val = GGML_FP16_TO_FP32(ggml_table_exp_f16[scvt]); -#endif - sum += (ggml_float)val; - ds0[i] = val; - } - } - - assert(sum > 0.0); - sum = (1.0 - eps)/sum; - } + float max = -INFINITY; + ggml_vec_max_f32(nc, &max, s0); + ggml_float sum = ggml_vec_soft_max_f32(nc, ds0, s0, max); + assert(sum > 0.0); + sum = (1.0 - eps) / sum; // grad(src0) = (softmax(src0) - src1) * grad(cross_entropy_loss(src0, src1)) / nr ggml_vec_scale_f32(nc, ds0, sum); From ee94172d33399d2e814ca05c8a3ff8c523ebb093 Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Fri, 17 May 2024 10:00:17 +0300 Subject: [PATCH 42/51] server : add support for the RPC backend (#7305) ref: #7292 --- examples/server/server.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 7e0d068f8..b598076d3 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2387,6 +2387,7 @@ static void server_print_usage(const char * argv0, const gpt_params & params, co printf(" --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n"); printf(" --host ip address to listen (default (default: %s)\n", sparams.hostname.c_str()); printf(" --port PORT port to listen (default (default: %d)\n", sparams.port); + printf(" --rpc SERVERS comma separated list of RPC servers\n"); printf(" --path PUBLIC_PATH path from which to serve static files (default: disabled)\n"); printf(" --api-key API_KEY optional api key to enhance server security. If set, requests must include this key for access.\n"); printf(" --api-key-file FNAME path to file containing api keys delimited by new lines. If set, requests must include one of the keys for access.\n"); @@ -2439,6 +2440,12 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams, break; } sparams.port = std::stoi(argv[i]); + } else if (arg == "--rpc") { + if (++i >= argc) { + invalid_param = true; + break; + } + params.rpc_servers = argv[i]; } else if (arg == "--host") { if (++i >= argc) { invalid_param = true; From e18bc6aaf3b547890609ed254ee5248e720e5840 Mon Sep 17 00:00:00 2001 From: amd-lalithnc Date: Fri, 17 May 2024 12:31:58 +0530 Subject: [PATCH 43/51] convert : fix Qwen/Qwen-7b conversion (#7308) --- convert-hf-to-gguf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index cd875fa4a..2810e1e41 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -526,7 +526,7 @@ class Model: # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined added_vocab = tokenizer.special_tokens - reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in (vocab | added_vocab).items()} + reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()} for i in range(vocab_size): if i not in reverse_vocab: From 359cbe3f46c90ce6f5151005e411b8fb74f8139e Mon Sep 17 00:00:00 2001 From: Herman Semenov Date: Fri, 17 May 2024 07:08:49 +0000 Subject: [PATCH 44/51] ggml-quants, llama : removed excess checks (#7274) --- common/common.cpp | 2 +- ggml-quants.c | 2 +- llama.cpp | 8 ++------ 3 files changed, 4 insertions(+), 8 deletions(-) diff --git a/common/common.cpp b/common/common.cpp index 96130ad54..e624fc7f3 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -2553,7 +2553,7 @@ void dump_string_yaml_multiline(FILE * stream, const char * prop_name, const cha size_t pos_start = 0; size_t pos_found = 0; - if (!data_str.empty() && (std::isspace(data_str[0]) || std::isspace(data_str.back()))) { + if (std::isspace(data_str[0]) || std::isspace(data_str.back())) { data_str = std::regex_replace(data_str, std::regex("\n"), "\\n"); data_str = std::regex_replace(data_str, std::regex("\""), "\\\""); data_str = std::regex_replace(data_str, std::regex(R"(\\[^n"])"), R"(\$&)"); diff --git a/ggml-quants.c b/ggml-quants.c index f13599f6b..9b291d522 100644 --- a/ggml-quants.c +++ b/ggml-quants.c @@ -1986,7 +1986,7 @@ static void quantize_row_q3_K_impl(const float * restrict x, block_q3_K * restri for (int j = 0; j < QK_K/16; ++j) { if (quant_weights) { - const float * qw = quant_weights ? quant_weights + QK_K * i + 16*j : NULL; + const float * qw = quant_weights + QK_K * i + 16*j; for (int l = 0; l < 16; ++l) weight[l] = qw[l] * sqrtf(sigma2 + x[16*j+l]*x[16*j+l]); } else { for (int l = 0; l < 16; ++l) weight[l] = x[16*j+l]*x[16*j+l]; diff --git a/llama.cpp b/llama.cpp index 0ef756a52..daaa138b9 100644 --- a/llama.cpp +++ b/llama.cpp @@ -13904,9 +13904,7 @@ llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_ // Sample the next word X using top-k sampling llama_sample_top_k(nullptr, candidates, int(k), 1); - if (ctx) { - ctx->t_sample_us += ggml_time_us() - t_start_sample_us; - } + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; llama_token X = llama_sample_token(ctx, candidates); t_start_sample_us = ggml_time_us(); @@ -13920,9 +13918,7 @@ llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_ // Update mu using the learning rate and error *mu = *mu - eta * e; - if (ctx) { - ctx->t_sample_us += ggml_time_us() - t_start_sample_us; - } + ctx->t_sample_us += ggml_time_us() - t_start_sample_us; return X; } From 29c60d8cddcfd14fa8a6bf023a6c4eb8692c76ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Fri, 17 May 2024 09:59:57 +0200 Subject: [PATCH 45/51] tokenization: add warning for double BOS (#7332) --- llama.cpp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/llama.cpp b/llama.cpp index daaa138b9..c5a1fa0f6 100644 --- a/llama.cpp +++ b/llama.cpp @@ -12818,6 +12818,13 @@ static std::vector llama_tokenize_internal(const llama_vocab & } } + if (add_special && vocab.special_add_bos != 0 && output.size() >= 2 && output[1] == vocab.special_bos_id) { + LLAMA_LOG_WARN( + "%s: Added a BOS token to the prompt as specified by the model but the prompt " + "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. " + "Are you sure this is what you want?\n", __FUNCTION__); + } + if (add_special && vocab.special_add_eos == 1) { GGML_ASSERT(vocab.special_eos_id != -1); output.push_back(vocab.special_eos_id); @@ -12844,6 +12851,13 @@ static std::vector llama_tokenize_internal(const llama_vocab & } } + if (add_special && vocab.special_add_bos != 0 && output.size() >= 2 && output[1] == vocab.special_bos_id) { + LLAMA_LOG_WARN( + "%s: Added a BOS token to the prompt as specified by the model but the prompt " + "also starts with a BOS token. So now the final prompt starts with 2 BOS tokens. " + "Are you sure this is what you want?\n", __FUNCTION__); + } + if (add_special && vocab.special_add_eos == 1) { GGML_ASSERT(vocab.special_add_eos != -1); output.push_back(vocab.special_eos_id); From 27b040691cbe45314147c2745e891a38e9c048d4 Mon Sep 17 00:00:00 2001 From: fairydreaming <166155368+fairydreaming@users.noreply.github.com> Date: Fri, 17 May 2024 13:24:38 +0200 Subject: [PATCH 46/51] llama : use n_embd_head_v when reshaping kqv (#7327) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * llama : use n_embd_head_v instead of n_embd_head_k when reshaping kqv * llama : use n_embd_v_gqa and n_embd_head_v instead of n_embd_k_gqa and n_embd_head_k when making a view of cached value vectors. --------- Co-authored-by: Stanisław Szymczyk --- llama.cpp | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/llama.cpp b/llama.cpp index c5a1fa0f6..e11f0ac4b 100644 --- a/llama.cpp +++ b/llama.cpp @@ -6622,6 +6622,7 @@ static struct ggml_tensor * llm_build_kqv( const int64_t n_embd_head_k = hparams.n_embd_head_k; const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(); const int64_t n_embd_head_v = hparams.n_embd_head_v; + const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(); struct ggml_tensor * q = ggml_permute(ctx, q_cur, 0, 2, 1, 3); cb(q, "q", il); @@ -6644,8 +6645,8 @@ static struct ggml_tensor * llm_build_kqv( struct ggml_tensor * v = ggml_view_3d(ctx, kv.v_l[il], n_embd_head_v, n_kv, n_head_kv, - ggml_row_size(kv.v_l[il]->type, n_embd_k_gqa), - ggml_row_size(kv.v_l[il]->type, n_embd_head_k), + ggml_row_size(kv.v_l[il]->type, n_embd_v_gqa), + ggml_row_size(kv.v_l[il]->type, n_embd_head_v), 0); cb(v, "v", il); @@ -6655,7 +6656,7 @@ static struct ggml_tensor * llm_build_kqv( ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32); } - cur = ggml_reshape_2d(ctx, cur, n_embd_head_k*n_head, n_tokens); + cur = ggml_reshape_2d(ctx, cur, n_embd_head_v*n_head, n_tokens); } else { struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q); cb(kq, "kq", il); @@ -6700,7 +6701,7 @@ static struct ggml_tensor * llm_build_kqv( struct ggml_tensor * kqv_merged = ggml_permute(ctx, kqv, 0, 2, 1, 3); cb(kqv_merged, "kqv_merged", il); - cur = ggml_cont_2d(ctx, kqv_merged, n_embd_head_k*n_head, n_tokens); + cur = ggml_cont_2d(ctx, kqv_merged, n_embd_head_v*n_head, n_tokens); cb(cur, "kqv_merged_cont", il); } From d273c1402b25086fd91aef2467ac13f2e49fa0ea Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Fri, 17 May 2024 15:11:45 +0300 Subject: [PATCH 47/51] py : convert-hf-to-gguf-update improvements (#7340) * convert-hf-to-gguf-update: automate updating * convert-hf-to-gguf-update: improve download * share requests session for performance * create directories only when needed, don't skip downloads when empty directory encountered * be more graceful about errors --- convert-hf-to-gguf-update.py | 87 ++++++++++++++++-------------------- convert-hf-to-gguf.py | 2 + 2 files changed, 41 insertions(+), 48 deletions(-) diff --git a/convert-hf-to-gguf-update.py b/convert-hf-to-gguf-update.py index 14aa0c45a..27983fadf 100755 --- a/convert-hf-to-gguf-update.py +++ b/convert-hf-to-gguf-update.py @@ -20,11 +20,13 @@ # - Update llama.cpp with the new pre-tokenizer if necessary # # TODO: generate tokenizer tests for llama.cpp -# TODO: automate the update of convert-hf-to-gguf.py # import logging import os +import pathlib +import re + import requests import sys import json @@ -35,6 +37,7 @@ from transformers import AutoTokenizer logging.basicConfig(level=logging.DEBUG) logger = logging.getLogger("convert-hf-to-gguf-update") +sess = requests.Session() class TOKENIZER_TYPE(IntEnum): @@ -79,63 +82,44 @@ models = [ {"name": "jina-v2-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", }, ] -# make directory "models/tokenizers" if it doesn't exist -if not os.path.exists("models/tokenizers"): - os.makedirs("models/tokenizers") - def download_file_with_auth(url, token, save_path): headers = {"Authorization": f"Bearer {token}"} - response = requests.get(url, headers=headers) - if response.status_code == 200: - with open(save_path, 'wb') as f: - f.write(response.content) - logger.info(f"File {save_path} downloaded successfully") - else: - logger.info(f"Failed to download file. Status code: {response.status_code}") + response = sess.get(url, headers=headers) + response.raise_for_status() + os.makedirs(os.path.dirname(save_path), exist_ok=True) + with open(save_path, 'wb') as f: + f.write(response.content) + logger.info(f"File {save_path} downloaded successfully") -# download the tokenizer models -for model in models: +def download_model(model): name = model["name"] repo = model["repo"] tokt = model["tokt"] - if not os.path.exists(f"models/tokenizers/{name}"): - os.makedirs(f"models/tokenizers/{name}") - else: - logger.info(f"Directory models/tokenizers/{name} already exists - skipping") - continue - - logger.info(f"Downloading {name} to models/tokenizers/{name}") - - url = f"{repo}/raw/main/config.json" - save_path = f"models/tokenizers/{name}/config.json" - download_file_with_auth(url, token, save_path) - - url = f"{repo}/raw/main/tokenizer.json" - save_path = f"models/tokenizers/{name}/tokenizer.json" - download_file_with_auth(url, token, save_path) - - # if downloaded file is less than 1KB, we likely need to download an LFS instead - if os.path.getsize(save_path) < 1024: - # remove the file - os.remove(save_path) - url = f"{repo}/resolve/main/tokenizer.json" - save_path = f"models/tokenizers/{name}/tokenizer.json" - download_file_with_auth(url, token, save_path) + os.makedirs(f"models/tokenizers/{name}", exist_ok=True) + files = ["config.json", "tokenizer.json", "tokenizer_config.json"] if tokt == TOKENIZER_TYPE.SPM: - url = f"{repo}/resolve/main/tokenizer.model" - save_path = f"models/tokenizers/{name}/tokenizer.model" - download_file_with_auth(url, token, save_path) + files.append("tokenizer.model") + + for file in files: + save_path = f"models/tokenizers/{name}/{file}" + if os.path.isfile(save_path): + logger.info(f"{name}: File {save_path} already exists - skipping") + continue + download_file_with_auth(f"{repo}/resolve/main/{file}", token, save_path) + + +for model in models: + try: + download_model(model) + except Exception as e: + logger.error(f"Failed to download model {model['name']}. Error: {e}") - url = f"{repo}/raw/main/tokenizer_config.json" - save_path = f"models/tokenizers/{name}/tokenizer_config.json" - download_file_with_auth(url, token, save_path) # generate the source code for the convert-hf-to-gguf.py:get_vocab_base_pre() function: -# TODO: auto-update convert-hf-to-gguf.py with the generated function src_ifs = "" for model in models: @@ -224,11 +208,18 @@ src_func = f""" return res """ -print(src_func) # noqa: NP100 +convert_py_pth = pathlib.Path("convert-hf-to-gguf.py") +convert_py = convert_py_pth.read_text() +convert_py = re.sub( + r"(# Marker: Start get_vocab_base_pre)(.+?)( +# Marker: End get_vocab_base_pre)", + lambda m: m.group(1) + src_func + m.group(3), + convert_py, + flags=re.DOTALL | re.MULTILINE, +) -logger.info("\n") -logger.info("!!! Copy-paste the function above into convert-hf-to-gguf.py !!!") -logger.info("\n") +convert_py_pth.write_text(convert_py) + +logger.info("+++ convert-hf-to-gguf.py was updated") # generate tests for each tokenizer model diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 2810e1e41..5ba3161c7 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -402,6 +402,7 @@ class Model: # NOTE: this function is generated by convert-hf-to-gguf-update.py # do not modify it manually! # ref: https://github.com/ggerganov/llama.cpp/pull/6920 + # Marker: Start get_vocab_base_pre def get_vocab_base_pre(self, tokenizer) -> str: # encoding this string and hashing the resulting tokens would (hopefully) give us a unique identifier that # is specific for the BPE pre-tokenizer used by the model @@ -489,6 +490,7 @@ class Model: logger.debug(f"chkhsh: {chkhsh}") return res + # Marker: End get_vocab_base_pre def _set_vocab_gpt2(self) -> None: tokens, toktypes, tokpre = self.get_vocab_base() From 51e9d02599336e62948d29f1d6c05addeb921ac2 Mon Sep 17 00:00:00 2001 From: Brian Date: Fri, 17 May 2024 22:40:14 +1000 Subject: [PATCH 48/51] Added a single test function script and fix debug-test.sh to be more robust (#7279) * run-single-test.sh: added a single test function script and fix debug-test.sh to be more robust * debug-test.sh: combined execute and gdb test mode via -g flag * debug-test.sh: refactor * debug-test: refactor for clarity * debug-test.sh: comment style changes * debug-test.sh: fix gdb --- docs/debugging-tests.md | 24 +++- scripts/debug-test.sh | 266 ++++++++++++++++++++++++++-------------- 2 files changed, 196 insertions(+), 94 deletions(-) diff --git a/docs/debugging-tests.md b/docs/debugging-tests.md index 51a125e19..18407f688 100644 --- a/docs/debugging-tests.md +++ b/docs/debugging-tests.md @@ -1,6 +1,6 @@ # Debugging Tests Tips -## How to run & debug a specific test without anything else to keep the feedback loop short? +## How to run & execute or debug a specific test without anything else to keep the feedback loop short? There is a script called debug-test.sh in the scripts folder whose parameter takes a REGEX and an optional test number. @@ -10,13 +10,27 @@ For example, running the following command will output an interactive list from It will then build & run in the debugger for you. +To just execute a test and get back a PASS or FAIL message run: + ```bash ./scripts/debug-test.sh test-tokenizer +``` + +To test in GDB use the `-g` flag to enable gdb test mode. + +```bash +./scripts/debug-test.sh -g test-tokenizer # Once in the debugger, i.e. at the chevrons prompt, setting a breakpoint could be as follows: >>> b main ``` +To speed up the testing loop, if you know your test number you can just run it similar to below: + +```bash +./scripts/debug-test.sh test 23 +``` + For further reference use `debug-test.sh -h` to print help.   @@ -41,7 +55,7 @@ cmake -DCMAKE_BUILD_TYPE=Debug -DLLAMA_CUDA=1 -DLLAMA_FATAL_WARNINGS=ON .. make -j ``` -#### Step 3.1: Identify Test Command for Debugging +#### Step 3: Find all tests available that matches REGEX The output of this command will give you the command & arguments needed to run GDB. @@ -69,11 +83,13 @@ Labels: main ... ``` -So for test #1 we can tell these two pieces of relevant information: +#### Step 4: Identify Test Command for Debugging + +So for test #1 above we can tell these two pieces of relevant information: * Test Binary: `~/llama.cpp/build-ci-debug/bin/test-tokenizer-0` * Test GGUF Model: `~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf` -#### Step 3.2: Run GDB on test command +#### Step 5: Run GDB on test command Based on the ctest 'test command' report above we can then run a gdb session via this command below: diff --git a/scripts/debug-test.sh b/scripts/debug-test.sh index 231a23d69..7b2b601a9 100755 --- a/scripts/debug-test.sh +++ b/scripts/debug-test.sh @@ -1,117 +1,203 @@ #!/bin/bash -test_suite=${1:-} -test_number=${2:-} PROG=${0##*/} build_dir="build-ci-debug" -if [ x"$1" = x"-h" ] || [ x"$1" = x"--help" ]; then - echo "Usage: $PROG [OPTION]... (test_number)" - echo "Debug specific ctest program." - echo - echo "Options:" - echo " -h, --help Display this help and exit" - echo - echo "Arguments:" - echo " (Mandatory) Supply one regex to the script to filter tests" - echo " (test_number) (Optional) Test number to run a specific test" - echo - echo "Example:" - echo " $PROG test-tokenizer" - echo " $PROG test-tokenizer 3" - echo - exit 0 -fi +# Print Color Commands +red=$(tput setaf 1) +green=$(tput setaf 2) +yellow=$(tput setaf 3) +blue=$(tput setaf 4) +magenta=$(tput setaf 5) +cyan=$(tput setaf 6) +normal=$(tput sgr0) -# Function to select and debug a test -function select_test() { - test_suite=${1:-test} - test_number=${2:-} - # Sanity Check If Tests Is Detected - printf "\n\nGathering tests that fit REGEX: ${test_suite} ...\n" - tests=($(ctest -R ${test_suite} -V -N | grep -E " +Test +#[0-9]+*" | cut -d':' -f2 | awk '{$1=$1};1')) - if [ ${#tests[@]} -eq 0 ] - then - echo "No tests avaliable... check your compliation process..." - echo "Exiting." - exit 1 - fi +# Print Help Message +#################### - if [ -z $test_number ] - then - # List out avaliable tests - printf "Which test would you like to debug?\n" - id=0 - for s in "${tests[@]}" - do - echo "Test# ${id}" - echo " $s" - ((id++)) - done +print_full_help() { + cat << EOF +Usage: $PROG [OPTION]... (test_number) +Debug specific ctest program. - # Prompt user which test they wanted to run - printf "\nRun test#? " - read test_number - else - printf "\nUser Already Requested #${test_number}" - fi +Options: + -h, --help display this help and exit + -g run in gdb mode - # Start GDB with the requested test binary and arguments - printf "Debugging(GDB) test: ${tests[test_number]}\n" - # Change IFS (Internal Field Separator) - sIFS=$IFS - IFS=$'\n' +Arguments: + (Mandatory) Supply one regex to the script to filter tests + (test_number) (Optional) Test number to run a specific test - # Get test args - gdb_args=($(ctest -R ${test_suite} -V -N | grep "Test command" | cut -d':' -f3 | awk '{$1=$1};1' )) - IFS=$sIFS - printf "Debug arguments: ${gdb_args[test_number]}\n\n" - - # Expand paths if needed - args=() - for x in $(echo ${gdb_args[test_number]} | sed -e 's/"\/\"//') - do - args+=($(echo $x | sed -e 's/.*\/..\//..\//')) - done - - # Execute debugger - echo "gdb args: ${args[@]}" - gdb --args ${args[@]} +Example: + $PROG test-tokenizer + $PROG test-tokenizer 3 +EOF } +abort() { + echo "Error: $1" >&2 + cat << EOF >&2 +Usage: $PROG [OPTION]... (test_number) +Debug specific ctest program. +Refer to --help for full instructions. +EOF + exit 1 +} + + +# Dependency Sanity Check +######################### + +check_dependency() { + command -v "$1" >/dev/null 2>&1 || { + abort "$1 is required but not found. Please install it and try again." + } +} + +check_dependency ctest +check_dependency cmake + + # Step 0: Check the args -if [ -z "$test_suite" ] -then - echo "Usage: $PROG [OPTION]... (test_number)" - echo "Supply one regex to the script to filter tests," - echo "and optionally a test number to run a specific test." - echo "Use --help flag for full instructions" - exit 1 +######################## + +if [ x"$1" = x"-h" ] || [ x"$1" = x"--help" ]; then + print_full_help >&2 + exit 0 fi +# Parse command-line options +gdb_mode=false +while getopts "g" opt; do + case $opt in + g) + gdb_mode=true + echo "gdb_mode Mode Enabled" + ;; + esac +done + +# Shift the option parameters +shift $((OPTIND - 1)) + +# Positionial Argument Processing : +if [ -z "${1}" ]; then + abort "Test regex is required" +else + test_suite=${1:-} +fi + +# Positionial Argument Processing : (test_number) +test_number=${2:-} + + # Step 1: Reset and Setup folder context +######################################## + ## Sanity check that we are actually in a git repo repo_root=$(git rev-parse --show-toplevel) if [ ! -d "$repo_root" ]; then - echo "Error: Not in a Git repository." - exit 1 + abort "Not in a Git repository." fi -## Reset folder to root context of git repo -pushd "$repo_root" || exit 1 +## Reset folder to root context of git repo and Create and enter build directory +pushd "$repo_root" +rm -rf "$build_dir" && mkdir "$build_dir" || abort "Failed to make $build_dir" -## Create and enter build directory -rm -rf "$build_dir" && mkdir "$build_dir" || exit 1 # Step 2: Setup Build Environment and Compile Test Binaries -cmake -B "./$build_dir" -DCMAKE_BUILD_TYPE=Debug -DLLAMA_CUDA=1 -DLLAMA_FATAL_WARNINGS=ON || exit 1 -pushd "$build_dir" && make -j || exit 1 +########################################################### -# Step 3: Debug the Test -select_test "$test_suite" "$test_number" +# Note: test-eval-callback requires -DLLAMA_CURL +cmake -B "./$build_dir" -DCMAKE_BUILD_TYPE=Debug -DLLAMA_CUDA=1 -DLLAMA_CURL=1 || abort "Failed to build enviroment" +pushd "$build_dir" +make -j || abort "Failed to compile" +popd > /dev/null || exit 1 -# Step 4: Return to the directory from which the user ran the command. -popd || exit 1 -popd || exit 1 -popd || exit 1 + +# Step 3: Find all tests available that matches REGEX +#################################################### + +# Ctest Gather Tests +# `-R test-tokenizer` : looks for all the test files named `test-tokenizer*` (R=Regex) +# `-N` : "show-only" disables test execution & shows test commands that you can feed to GDB. +# `-V` : Verbose Mode +printf "\n\nGathering tests that fit REGEX: ${test_suite} ...\n" +pushd "$build_dir" +tests=($(ctest -R ${test_suite} -V -N | grep -E " +Test +#[0-9]+*" | cut -d':' -f2 | awk '{$1=$1};1')) +if [ ${#tests[@]} -eq 0 ]; then + abort "No tests avaliable... check your compliation process..." +fi +popd > /dev/null || exit 1 + + +# Step 4: Identify Test Command for Debugging +############################################# + +# Select test number +if [ -z $test_number ]; then + # List out avaliable tests + printf "Which test would you like to debug?\n" + id=0 + for s in "${tests[@]}" + do + echo "Test# ${id}" + echo " $s" + ((id++)) + done + + # Prompt user which test they wanted to run + printf "\nRun test#? " + read test_number + +else + printf "\nUser Already Requested #${test_number}\n" + +fi + +# Grab all tests commands +pushd "$build_dir" +sIFS=$IFS # Save Initial IFS (Internal Field Separator) +IFS=$'\n' # Change IFS (Internal Field Separator) (So we split ctest output by newline rather than by spaces) +test_args=($(ctest -R ${test_suite} -V -N | grep "Test command" | cut -d':' -f3 | awk '{$1=$1};1' )) # Get test args +IFS=$sIFS # Reset IFS (Internal Field Separator) +popd > /dev/null || exit 1 + +# Grab specific test command +single_test_name="${tests[test_number]}" +single_test_command="${test_args[test_number]}" + + +# Step 5: Execute or GDB Debug +############################## + +printf "${magenta}Running Test #${test_number}: ${single_test_name}${normal}\n" +printf "${cyan}single_test_command: ${single_test_command}${normal}\n" + +if [ "$gdb_mode" = "true" ]; then + # Execute debugger + pushd "$repo_root" || exit 1 + eval "gdb --args ${single_test_command}" + popd > /dev/null || exit 1 + +else + # Execute Test + pushd "$repo_root" || exit 1 + eval "${single_test_command}" + exit_code=$? + popd > /dev/null || exit 1 + + # Print Result + printf "${blue}Ran Test #${test_number}: ${single_test_name}${normal}\n" + printf "${yellow}Command: ${single_test_command}${normal}\n" + if [ $exit_code -eq 0 ]; then + printf "${green}TEST PASS${normal}\n" + else + printf "${red}TEST FAIL${normal}\n" + fi + +fi + +# Return to the directory from which the user ran the command. +popd > /dev/null || exit 1 From f4bd8b3d260bb09491ba63c77ab7012b744362ef Mon Sep 17 00:00:00 2001 From: Radoslav Gerganov Date: Fri, 17 May 2024 17:25:44 +0300 Subject: [PATCH 49/51] rpc : set SO_REUSEADDR for the server socket (#7320) ref: #7293 --- examples/rpc/rpc-server.cpp | 4 ++++ ggml-rpc.cpp | 13 +++++++++++-- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/examples/rpc/rpc-server.cpp b/examples/rpc/rpc-server.cpp index 41f377376..7c15d2aa4 100644 --- a/examples/rpc/rpc-server.cpp +++ b/examples/rpc/rpc-server.cpp @@ -56,6 +56,10 @@ static bool rpc_server_params_parse(int argc, char ** argv, rpc_server_params & } else if (arg == "-h" || arg == "--help") { print_usage(argc, argv, params); exit(0); + } else { + fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); + print_usage(argc, argv, params); + exit(0); } } return true; diff --git a/ggml-rpc.cpp b/ggml-rpc.cpp index ba392009f..4a9bfa52d 100644 --- a/ggml-rpc.cpp +++ b/ggml-rpc.cpp @@ -134,7 +134,13 @@ static bool set_no_delay(sockfd_t sockfd) { int flag = 1; // set TCP_NODELAY to disable Nagle's algorithm int ret = setsockopt(sockfd, IPPROTO_TCP, TCP_NODELAY, (char *)&flag, sizeof(int)); - return ret >= 0; + return ret == 0; +} + +static bool set_reuse_addr(sockfd_t sockfd) { + int flag = 1; + int ret = setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, (char *)&flag, sizeof(int)); + return ret == 0; } static std::shared_ptr socket_connect(const char * host, int port) { @@ -181,7 +187,10 @@ static std::shared_ptr create_server_socket(const char * host, int por if (sock == nullptr) { return nullptr; } - + if (!set_reuse_addr(sockfd)) { + fprintf(stderr, "Failed to set SO_REUSEADDR\n"); + return nullptr; + } struct sockaddr_in serv_addr; serv_addr.sin_family = AF_INET; serv_addr.sin_addr.s_addr = inet_addr(host); From 82ca83db3c8d45df559c03a4225b6eb34808a2db Mon Sep 17 00:00:00 2001 From: Gavin Zhao Date: Fri, 17 May 2024 11:03:03 -0400 Subject: [PATCH 50/51] ROCm: use native CMake HIP support (#5966) Supercedes #4024 and #4813. CMake's native HIP support has become the recommended way to add HIP code into a project (see [here](https://rocm.docs.amd.com/en/docs-6.0.0/conceptual/cmake-packages.html#using-hip-in-cmake)). This PR makes the following changes: 1. The environment variable `HIPCXX` or CMake option `CMAKE_HIP_COMPILER` should be used to specify the HIP compiler. Notably this shouldn't be `hipcc`, but ROCm's clang, which usually resides in `$ROCM_PATH/llvm/bin/clang`. Previously this was control by `CMAKE_C_COMPILER` and `CMAKE_CXX_COMPILER`. Note that since native CMake HIP support is not yet available on Windows, on Windows we fall back to the old behavior. 2. CMake option `CMAKE_HIP_ARCHITECTURES` is used to control the GPU architectures to build for. Previously this was controled by `GPU_TARGETS`. 3. Updated the Nix recipe to account for these new changes. 4. The GPU targets to build against in the Nix recipe is now consistent with the supported GPU targets in nixpkgs. 5. Added CI checks for HIP on both Linux and Windows. On Linux, we test both the new and old behavior. The most important part about this PR is the separation of the HIP compiler and the C/C++ compiler. This allows users to choose a different C/C++ compiler if desired, compared to the current situation where when building for ROCm support, everything must be compiled with ROCm's clang. ~~Makefile is unchanged. Please let me know if we want to be consistent on variables' naming because Makefile still uses `GPU_TARGETS` to control architectures to build for, but I feel like setting `CMAKE_HIP_ARCHITECTURES` is a bit awkward when you're calling `make`.~~ Makefile used `GPU_TARGETS` but the README says to use `AMDGPU_TARGETS`. For consistency with CMake, all usage of `GPU_TARGETS` in Makefile has been updated to `AMDGPU_TARGETS`. Thanks to the suggestion of @jin-eld, to maintain backwards compatibility (and not break too many downstream users' builds), if `CMAKE_CXX_COMPILER` ends with `hipcc`, then we still compile using the original behavior and emit a warning that recommends switching to the new HIP support. Similarly, if `AMDGPU_TARGETS` is set but `CMAKE_HIP_ARCHITECTURES` is not, then we forward `AMDGPU_TARGETS` to `CMAKE_HIP_ARCHITECTURES` to ease the transition to the new HIP support. Signed-off-by: Gavin Zhao --- .devops/nix/package.nix | 16 +++++----- .github/workflows/build.yml | 58 +++++++++++++++++++++++++++++++++++++ CMakeLists.txt | 42 ++++++++++++++++++++++----- Makefile | 6 ++-- README.md | 25 ++++++++++++---- 5 files changed, 122 insertions(+), 25 deletions(-) diff --git a/.devops/nix/package.nix b/.devops/nix/package.nix index 2c0ae4e2a..1c9633cdf 100644 --- a/.devops/nix/package.nix +++ b/.devops/nix/package.nix @@ -227,20 +227,20 @@ effectiveStdenv.mkDerivation ( ) ] ++ optionals useRocm [ - (cmakeFeature "CMAKE_C_COMPILER" "hipcc") - (cmakeFeature "CMAKE_CXX_COMPILER" "hipcc") - - # Build all targets supported by rocBLAS. When updating search for TARGET_LIST_ROCM - # in https://github.com/ROCmSoftwarePlatform/rocBLAS/blob/develop/CMakeLists.txt - # and select the line that matches the current nixpkgs version of rocBLAS. - # Should likely use `rocmPackages.clr.gpuTargets`. - "-DAMDGPU_TARGETS=gfx803;gfx900;gfx906:xnack-;gfx908:xnack-;gfx90a:xnack+;gfx90a:xnack-;gfx940;gfx941;gfx942;gfx1010;gfx1012;gfx1030;gfx1100;gfx1101;gfx1102" + (cmakeFeature "CMAKE_HIP_COMPILER" "${rocmPackages.llvm.clang}/bin/clang") + (cmakeFeature "CMAKE_HIP_ARCHITECTURES" (builtins.concatStringsSep ";" rocmPackages.clr.gpuTargets)) ] ++ optionals useMetalKit [ (lib.cmakeFeature "CMAKE_C_FLAGS" "-D__ARM_FEATURE_DOTPROD=1") (cmakeBool "LLAMA_METAL_EMBED_LIBRARY" (!precompileMetalShaders)) ]; + # Environment variables needed for ROCm + env = optionals useRocm { + ROCM_PATH = "${rocmPackages.clr}"; + HIP_DEVICE_LIB_PATH = "${rocmPackages.rocm-device-libs}/amdgcn/bitcode"; + }; + # TODO(SomeoneSerge): It's better to add proper install targets at the CMake level, # if they haven't been added yet. postInstall = '' diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0742443c6..0109cc004 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -392,6 +392,33 @@ jobs: cmake -DLLAMA_VULKAN=ON .. cmake --build . --config Release -j $(nproc) + ubuntu-22-cmake-hip: + runs-on: ubuntu-22.04 + container: rocm/dev-ubuntu-22.04:6.0.2 + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v3 + + - name: Dependencies + id: depends + run: | + sudo apt-get update + sudo apt-get install -y build-essential git cmake rocblas-dev hipblas-dev + + - name: Build with native CMake HIP support + id: cmake_build + run: | + cmake -B build -S . -DCMAKE_HIP_COMPILER="$(hipconfig -l)/clang" -DLLAMA_HIPBLAS=ON + cmake --build build --config Release -j $(nproc) + + - name: Build with legacy HIP support + id: cmake_build_legacy_hip + run: | + cmake -B build2 -S . -DCMAKE_C_COMPILER=hipcc -DCMAKE_CXX_COMPILER=hipcc -DLLAMA_HIPBLAS=ON + cmake --build build2 --config Release -j $(nproc) + ubuntu-22-cmake-sycl: runs-on: ubuntu-22.04 @@ -989,6 +1016,37 @@ jobs: path: llama-${{ steps.tag.outputs.name }}-bin-win-sycl-x64.zip name: llama-bin-win-sycl-x64.zip + windows-latest-cmake-hip: + runs-on: windows-latest + + steps: + - name: Clone + id: checkout + uses: actions/checkout@v3 + + - name: Install + id: depends + run: | + $ErrorActionPreference = "Stop" + write-host "Downloading AMD HIP SDK Installer" + Invoke-WebRequest -Uri "https://download.amd.com/developer/eula/rocm-hub/AMD-Software-PRO-Edition-23.Q4-WinSvr2022-For-HIP.exe" -OutFile "${env:RUNNER_TEMP}\rocm-install.exe" + write-host "Installing AMD HIP SDK" + Start-Process "${env:RUNNER_TEMP}\rocm-install.exe" -ArgumentList '-install' -NoNewWindow -Wait + write-host "Completed AMD HIP SDK installation" + + - name: Verify ROCm + id: verify + run: | + & 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' --version + + - name: Build + id: cmake_build + run: | + $env:HIP_PATH=$(Resolve-Path 'C:\Program Files\AMD\ROCm\*\bin\clang.exe' | split-path | split-path) + $env:CMAKE_PREFIX_PATH="${env:HIP_PATH}" + cmake -G "Unix Makefiles" -B build -S . -DCMAKE_C_COMPILER="${env:HIP_PATH}\bin\clang.exe" -DCMAKE_CXX_COMPILER="${env:HIP_PATH}\bin\clang++.exe" -DLLAMA_HIPBLAS=ON + cmake --build build --config Release + ios-xcode-build: runs-on: macos-latest diff --git a/CMakeLists.txt b/CMakeLists.txt index 8ab6a45a6..990e34b86 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -555,16 +555,37 @@ if (LLAMA_VULKAN) endif() if (LLAMA_HIPBLAS) - list(APPEND CMAKE_PREFIX_PATH /opt/rocm) + if ($ENV{ROCM_PATH}) + set(ROCM_PATH $ENV{ROCM_PATH}) + else() + set(ROCM_PATH /opt/rocm) + endif() + list(APPEND CMAKE_PREFIX_PATH ${ROCM_PATH}) - if (NOT ${CMAKE_C_COMPILER_ID} MATCHES "Clang") - message(WARNING "Only LLVM is supported for HIP, hint: CC=/opt/rocm/llvm/bin/clang") + # CMake on Windows doesn't support the HIP language yet + if(WIN32) + set(CXX_IS_HIPCC TRUE) + else() + string(REGEX MATCH "hipcc(\.bat)?$" CXX_IS_HIPCC "${CMAKE_CXX_COMPILER}") endif() - if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") - message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++") - endif() + if(CXX_IS_HIPCC) + if(LINUX) + if (NOT ${CMAKE_CXX_COMPILER_ID} MATCHES "Clang") + message(WARNING "Only LLVM is supported for HIP, hint: CXX=/opt/rocm/llvm/bin/clang++") + endif() + message(WARNING "Setting hipcc as the C++ compiler is legacy behavior." + " Prefer setting the HIP compiler directly. See README for details.") + endif() + else() + # Forward AMDGPU_TARGETS to CMAKE_HIP_ARCHITECTURES. + if(AMDGPU_TARGETS AND NOT CMAKE_HIP_ARCHITECTURES) + set(CMAKE_HIP_ARCHITECTURES ${AMDGPU_ARGETS}) + endif() + cmake_minimum_required(VERSION 3.21) + enable_language(HIP) + endif() find_package(hip REQUIRED) find_package(hipblas REQUIRED) find_package(rocblas REQUIRED) @@ -598,13 +619,18 @@ if (LLAMA_HIPBLAS) add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y}) add_compile_definitions(K_QUANTS_PER_ITERATION=${LLAMA_CUDA_KQUANTS_ITER}) - set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX) + if (CXX_IS_HIPCC) + set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE CXX) + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} hip::device) + else() + set_source_files_properties(${GGML_SOURCES_ROCM} PROPERTIES LANGUAGE HIP) + endif() if (LLAMA_STATIC) message(FATAL_ERROR "Static linking not supported for HIP/ROCm") endif() - set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} hip::device PUBLIC hip::host roc::rocblas roc::hipblas) + set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} PUBLIC hip::host roc::rocblas roc::hipblas) endif() if (LLAMA_SYCL) diff --git a/Makefile b/Makefile index 3fa56d13a..22d521856 100644 --- a/Makefile +++ b/Makefile @@ -560,10 +560,10 @@ endif # LLAMA_VULKAN ifdef LLAMA_HIPBLAS ifeq ($(wildcard /opt/rocm),) ROCM_PATH ?= /usr - GPU_TARGETS ?= $(shell $(shell which amdgpu-arch)) + AMDGPU_TARGETS ?= $(shell $(shell which amdgpu-arch)) else ROCM_PATH ?= /opt/rocm - GPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch) + AMDGPU_TARGETS ?= $(shell $(ROCM_PATH)/llvm/bin/amdgpu-arch) endif HIPCC ?= $(CCACHE) $(ROCM_PATH)/bin/hipcc LLAMA_CUDA_DMMV_X ?= 32 @@ -575,7 +575,7 @@ ifdef LLAMA_HIP_UMA endif # LLAMA_HIP_UMA MK_LDFLAGS += -L$(ROCM_PATH)/lib -Wl,-rpath=$(ROCM_PATH)/lib MK_LDFLAGS += -lhipblas -lamdhip64 -lrocblas - HIPFLAGS += $(addprefix --offload-arch=,$(GPU_TARGETS)) + HIPFLAGS += $(addprefix --offload-arch=,$(AMDGPU_TARGETS)) HIPFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X) HIPFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y) HIPFLAGS += -DK_QUANTS_PER_ITERATION=$(LLAMA_CUDA_KQUANTS_ITER) diff --git a/README.md b/README.md index 5d6217d13..7dd6fc0eb 100644 --- a/README.md +++ b/README.md @@ -528,13 +528,28 @@ Building the program with BLAS support may lead to some performance improvements ``` - Using `CMake` for Linux (assuming a gfx1030-compatible AMD GPU): ```bash - CC=/opt/rocm/llvm/bin/clang CXX=/opt/rocm/llvm/bin/clang++ \ - cmake -B build -DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \ + HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -R)" \ + cmake -S . -B build -DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \ && cmake --build build --config Release -- -j 16 ``` On Linux it is also possible to use unified memory architecture (UMA) to share main memory between the CPU and integrated GPU by setting `-DLLAMA_HIP_UMA=ON`. However, this hurts performance for non-integrated GPUs (but enables working with integrated GPUs). + Note that if you get the following error: + ``` + clang: error: cannot find ROCm device library; provide its path via '--rocm-path' or '--rocm-device-lib-path', or pass '-nogpulib' to build without ROCm device library + ``` + Try searching for a directory under `HIP_PATH` that contains the file + `oclc_abi_version_400.bc`. Then, add the following to the start of the + command: `HIP_DEVICE_LIB_PATH=`, so something + like: + ```bash + HIPCXX="$(hipconfig -l)/clang" HIP_PATH="$(hipconfig -p)" \ + HIP_DEVICE_LIB_PATH= \ + cmake -S . -B build -DLLAMA_HIPBLAS=ON -DAMDGPU_TARGETS=gfx1030 -DCMAKE_BUILD_TYPE=Release \ + && cmake --build build -- -j 16 + ``` + - Using `make` (example for target gfx1030, build with 16 CPU threads): ```bash make -j16 LLAMA_HIPBLAS=1 LLAMA_HIP_UMA=1 AMDGPU_TARGETS=gfx1030 @@ -543,10 +558,8 @@ Building the program with BLAS support may lead to some performance improvements - Using `CMake` for Windows (using x64 Native Tools Command Prompt for VS, and assuming a gfx1100-compatible AMD GPU): ```bash set PATH=%HIP_PATH%\bin;%PATH% - mkdir build - cd build - cmake -G Ninja -DAMDGPU_TARGETS=gfx1100 -DLLAMA_HIPBLAS=ON -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_BUILD_TYPE=Release .. - cmake --build . + cmake -S . -B build -G Ninja -DAMDGPU_TARGETS=gfx1100 -DLLAMA_HIPBLAS=ON -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DCMAKE_BUILD_TYPE=Release + cmake --build build ``` Make sure that `AMDGPU_TARGETS` is set to the GPU arch you want to compile for. The above example uses `gfx1100` that corresponds to Radeon RX 7900XTX/XT/GRE. You can find a list of targets [here](https://llvm.org/docs/AMDGPUUsage.html#processors) Find your gpu version string by matching the most significant version information from `rocminfo | grep gfx | head -1 | awk '{print $2}'` with the list of processors, e.g. `gfx1035` maps to `gfx1030`. From 0fc1e820a9900a3dd08ddd3c6abe6604c53b689b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Fri, 17 May 2024 18:54:52 +0200 Subject: [PATCH 51/51] CUDA: faster large batch FA without tensor cores (#7314) --- ggml-cuda/fattn-tile-f16.cu | 395 +++++++++++++++++++++++++++++++++++ ggml-cuda/fattn-tile-f16.cuh | 3 + ggml-cuda/fattn-tile-f32.cu | 393 ++++++++++++++++++++++++++++++++++ ggml-cuda/fattn-tile-f32.cuh | 3 + ggml-cuda/fattn-vec-f16.cu | 9 +- ggml-cuda/fattn-vec-f32.cu | 9 +- ggml-cuda/fattn.cu | 26 ++- 7 files changed, 823 insertions(+), 15 deletions(-) create mode 100644 ggml-cuda/fattn-tile-f16.cu create mode 100644 ggml-cuda/fattn-tile-f16.cuh create mode 100644 ggml-cuda/fattn-tile-f32.cu create mode 100644 ggml-cuda/fattn-tile-f32.cuh diff --git a/ggml-cuda/fattn-tile-f16.cu b/ggml-cuda/fattn-tile-f16.cu new file mode 100644 index 000000000..d2a1077ed --- /dev/null +++ b/ggml-cuda/fattn-tile-f16.cu @@ -0,0 +1,395 @@ +#include "common.cuh" +#include "fattn-common.cuh" +#include "fattn-tile-f16.cuh" + +#define FATTN_KQ_STRIDE_TILE_F16 64 + +template // D == head size +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +__launch_bounds__(nwarps*WARP_SIZE, 1) +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +static __global__ void flash_attn_tile_ext_f16( + const char * __restrict__ Q, + const char * __restrict__ K, + const char * __restrict__ V, + const char * __restrict__ mask, + float * __restrict__ dst, + float2 * __restrict__ dst_meta, + const float scale, + const float max_bias, + const float m0, + const float m1, + const uint32_t n_head_log2, + const int ne00, + const int ne01, + const int ne02, + const int ne03, + const int ne10, + const int ne11, + const int ne12, + const int ne13, + const int ne31, + const int nb31, + const int nb01, + const int nb02, + const int nb03, + const int nb11, + const int nb12, + const int nb13, + const int ne0, + const int ne1, + const int ne2, + const int ne3) { +#if FP16_AVAILABLE + //In this kernel Q, K, V are matrices while i, j, k are matrix indices. + + const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on. + const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + + const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. + const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0); + const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio)); + const half2 * V_h2 = (const half2 *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape + const half * maskh = (const half *) mask + ne11*ic0; + + const int stride_KV2 = nb11 / sizeof(half2); + + half slopeh = __float2half(1.0f); + + // ALiBi + if (max_bias > 0.0f) { + const uint32_t h = blockIdx.y; + + const float base = h < n_head_log2 ? m0 : m1; + const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; + + slopeh = __float2half(powf(base, exph)); + } + + static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); + + __shared__ half KQ[ncols*FATTN_KQ_STRIDE_TILE_F16]; + half2 * KQ2 = (half2 *) KQ; + + __shared__ half2 KV_tmp[FATTN_KQ_STRIDE_TILE_F16][D/2 + 1]; // Pad D to avoid memory bank conflicts. + + half kqmax[ncols/nwarps]; +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + kqmax[j0/nwarps] = -HALF_MAX_HALF; + } + half2 kqsum[ncols/nwarps] = {{0.0f, 0.0f}}; + + half2 VKQ[ncols/nwarps][(D/2)/WARP_SIZE] = {{{0.0f, 0.0f}}}; + + // Convert Q to half2 and store in registers: + __shared__ half2 Q_h2[ncols][D/2]; +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + const float2 tmp = Q_f2[j*(nb01/sizeof(float2)) + i]; + Q_h2[j][i] = make_half2(scale, scale) * make_half2(tmp.x, tmp.y); + } + } + + __syncthreads(); + + const int k_start = parallel_blocks == 1 ? 0 : ip*FATTN_KQ_STRIDE_TILE_F16; + for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE_TILE_F16) { + // Calculate KQ tile and keep track of new maximum KQ values: + + half kqmax_new[ncols/nwarps]; +#pragma unroll + for (int j = 0; j < ncols/nwarps; ++j) { + kqmax_new[j] = kqmax[j]; + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F16; i_KQ_0 += nwarps) { + const int i_KQ = i_KQ_0 + threadIdx.y; + +#pragma unroll + for (int k_KQ_0 = 0; k_KQ_0 < D/2; k_KQ_0 += WARP_SIZE) { + const int k_KQ = k_KQ_0 + threadIdx.x; + + KV_tmp[i_KQ][k_KQ] = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ]; + } + } + + __syncthreads(); + + half2 sum2[FATTN_KQ_STRIDE_TILE_F16/WARP_SIZE][ncols/nwarps] = {{{0.0f, 0.0f}}}; + +#pragma unroll + for (int k_KQ = 0; k_KQ < D/2; ++k_KQ) { + half2 K_k[FATTN_KQ_STRIDE_TILE_F16/WARP_SIZE]; + half2 Q_k[ncols/nwarps]; + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F16; i_KQ_0 += WARP_SIZE) { + const int i_KQ = i_KQ_0 + threadIdx.x; + + K_k[i_KQ_0/WARP_SIZE] = KV_tmp[i_KQ][k_KQ]; + } +#pragma unroll + for (int j_KQ_0 = 0; j_KQ_0 < ncols; j_KQ_0 += nwarps) { + const int j_KQ = j_KQ_0 + threadIdx.y; + + Q_k[j_KQ_0/nwarps] = Q_h2[j_KQ][k_KQ]; + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F16; i_KQ_0 += WARP_SIZE) { +#pragma unroll + for (int j_KQ_0 = 0; j_KQ_0 < ncols; j_KQ_0 += nwarps) { + sum2[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps] += K_k[i_KQ_0/WARP_SIZE]*Q_k[j_KQ_0/nwarps]; + } + } + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F16; i_KQ_0 += WARP_SIZE) { + const int i_KQ = i_KQ_0 + threadIdx.x; + +#pragma unroll + for (int j_KQ_0 = 0; j_KQ_0 < ncols; j_KQ_0 += nwarps) { + const int j_KQ = j_KQ_0 + threadIdx.y; + + half sum = __low2half(sum2[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps]) + __high2half(sum2[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps]); + sum += mask ? slopeh*maskh[j_KQ*ne11 + k_VKQ_0 + i_KQ] : __float2half(0.0f); + + kqmax_new[j_KQ_0/nwarps] = ggml_cuda_hmax(kqmax_new[j_KQ_0/nwarps], sum); + + KQ[j_KQ*FATTN_KQ_STRIDE_TILE_F16 + i_KQ] = sum; + } + } + + __syncthreads(); + +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + kqmax_new[j0/nwarps] = warp_reduce_max(kqmax_new[j0/nwarps]); + const half2 KQ_max_scale = __half2half2(hexp(kqmax[j0/nwarps] - kqmax_new[j0/nwarps])); + kqmax[j0/nwarps] = kqmax_new[j0/nwarps]; + +#pragma unroll + for (int i0 = 0; i0 < FATTN_KQ_STRIDE_TILE_F16/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + const half2 diff = KQ2[j*(FATTN_KQ_STRIDE_TILE_F16/2) + i] - __half2half2(kqmax[j0/nwarps]); + const half2 val = h2exp(diff); + kqsum[j0/nwarps] = kqsum[j0/nwarps]*KQ_max_scale + val; + KQ2[j*(FATTN_KQ_STRIDE_TILE_F16/2) + i] = val; + } + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + VKQ[j0/nwarps][i0/WARP_SIZE] *= KQ_max_scale; + } + } + + __syncthreads(); + +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE_TILE_F16; k0 += nwarps) { + const int k = k0 + threadIdx.y; + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + KV_tmp[k][i] = V_h2[(k_VKQ_0 + k)*stride_KV2 + i]; + } + } + + __syncthreads(); + +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE_TILE_F16; k0 += 2) { + half2 V_k[(D/2)/WARP_SIZE][2]; + half2 KQ_k[ncols/nwarps]; + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + V_k[i0/WARP_SIZE][0] = KV_tmp[k0 + 0][i]; + V_k[i0/WARP_SIZE][1] = KV_tmp[k0 + 1][i]; + } +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + KQ_k[j0/nwarps] = KQ2[j*(FATTN_KQ_STRIDE_TILE_F16/2) + k0/2]; + } + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + VKQ[j0/nwarps][i0/WARP_SIZE] += V_k[i0/WARP_SIZE][0]* __low2half2(KQ_k[j0/nwarps]); + VKQ[j0/nwarps][i0/WARP_SIZE] += V_k[i0/WARP_SIZE][1]*__high2half2(KQ_k[j0/nwarps]); + } + } + } + + __syncthreads(); + } + +#pragma unroll + for (int j_VKQ_0 = 0; j_VKQ_0 < ncols; j_VKQ_0 += nwarps) { + const int j_VKQ = j_VKQ_0 + threadIdx.y; + + half kqsum_j = __low2half(kqsum[j_VKQ_0/nwarps]) + __high2half(kqsum[j_VKQ_0/nwarps]); + kqsum_j = warp_reduce_sum(kqsum_j); + +#pragma unroll + for (int i00 = 0; i00 < D; i00 += 2*WARP_SIZE) { + const int i0 = i00 + 2*threadIdx.x; + + half2 dst_val = VKQ[j_VKQ_0/nwarps][i0/(2*WARP_SIZE)]; + if (parallel_blocks == 1) { + dst_val /= __half2half2(kqsum_j); + } + const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; + dst[j_dst*D*gridDim.y + D*blockIdx.y + i0 + 0] = __low2float(dst_val); + dst[j_dst*D*gridDim.y + D*blockIdx.y + i0 + 1] = __high2float(dst_val); + } + + if (parallel_blocks != 1 && threadIdx.x == 0) { + dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[j_VKQ_0/nwarps], kqsum_j); + } + } +#else + NO_DEVICE_CODE; +#endif // FP16_AVAILABLE +} + +template void launch_fattn_tile_f16( + const ggml_tensor * Q, const ggml_tensor * K, const ggml_tensor * V, ggml_tensor * KQV, const ggml_tensor * mask, + ggml_cuda_pool & pool, cudaStream_t main_stream +) { + ggml_cuda_pool_alloc dst_tmp(pool); + ggml_cuda_pool_alloc dst_tmp_meta(pool); + + if (parallel_blocks > 1) { + dst_tmp.alloc(parallel_blocks*ggml_nelements(KQV)); + dst_tmp_meta.alloc(parallel_blocks*ggml_nrows(KQV)); + } + + constexpr int nwarps = 8; + const dim3 block_dim(WARP_SIZE, nwarps, 1); + const dim3 blocks_num(parallel_blocks*((Q->ne[1] + cols_per_block - 1) / cols_per_block), Q->ne[2], Q->ne[3]); + const int shmem = 0; + + float scale = 1.0f; + float max_bias = 0.0f; + + memcpy(&scale, (float *) KQV->op_params + 0, sizeof(float)); + memcpy(&max_bias, (float *) KQV->op_params + 1, sizeof(float)); + + const uint32_t n_head = Q->ne[2]; + const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); + + const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + flash_attn_tile_ext_f16 + <<>> ( + (const char *) Q->data, + (const char *) K->data, + (const char *) V->data, + mask ? ((const char *) mask->data) : nullptr, + parallel_blocks == 1 ? (float *) KQV->data : dst_tmp.ptr, dst_tmp_meta.ptr, + scale, max_bias, m0, m1, n_head_log2, + Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3], + K->ne[0], K->ne[1], K->ne[2], K->ne[3], + mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0, + Q->nb[1], Q->nb[2], Q->nb[3], + K->nb[1], K->nb[2], K->nb[3], + KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3] + ); + CUDA_CHECK(cudaGetLastError()); + + if (parallel_blocks == 1) { + return; + } + + const dim3 block_dim_combine(D, 1, 1); + const dim3 blocks_num_combine(Q->ne[1], blocks_num.y, blocks_num.z); + const int shmem_combine = 0; + + flash_attn_combine_results + <<>> + (dst_tmp.ptr, dst_tmp_meta.ptr, (float *) KQV->data); + CUDA_CHECK(cudaGetLastError()); +} + +void ggml_cuda_flash_attn_ext_tile_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * Q = dst->src[0]; + const ggml_tensor * K = dst->src[1]; + const ggml_tensor * V = dst->src[2]; + + const ggml_tensor * mask = dst->src[3]; + + ggml_tensor * KQV = dst; + + const int32_t precision = KQV->op_params[2]; + GGML_ASSERT(precision == GGML_PREC_DEFAULT); + GGML_ASSERT(Q->ne[0] == 64 || Q->ne[0] == 128 && "FlashAttention without tensor cores only supports head sizes 64 and 128."); + + if (Q->ne[1] <= 16) { + constexpr int cols_per_block = 16; + constexpr int parallel_blocks = 4; + switch (Q->ne[0]) { + case 64: + launch_fattn_tile_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream()); + break; + case 128: + launch_fattn_tile_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream()); + break; + default: + GGML_ASSERT(false); + break; + } + return; + } + + if (Q->ne[1] <= 32) { + constexpr int cols_per_block = 32; + constexpr int parallel_blocks = 4; + switch (Q->ne[0]) { + case 64: + launch_fattn_tile_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream()); + break; + case 128: + launch_fattn_tile_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream()); + break; + default: + GGML_ASSERT(false); + break; + } + return; + } + + constexpr int cols_per_block = 32; + constexpr int parallel_blocks = 1; + switch (Q->ne[0]) { + case 64: + launch_fattn_tile_f16< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream()); + break; + case 128: + launch_fattn_tile_f16<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream()); + break; + default: + GGML_ASSERT(false); + break; + } +} diff --git a/ggml-cuda/fattn-tile-f16.cuh b/ggml-cuda/fattn-tile-f16.cuh new file mode 100644 index 000000000..ffc587842 --- /dev/null +++ b/ggml-cuda/fattn-tile-f16.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_flash_attn_ext_tile_f16(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml-cuda/fattn-tile-f32.cu b/ggml-cuda/fattn-tile-f32.cu new file mode 100644 index 000000000..176895edd --- /dev/null +++ b/ggml-cuda/fattn-tile-f32.cu @@ -0,0 +1,393 @@ +#include "common.cuh" +#include "fattn-common.cuh" +#include "fattn-tile-f32.cuh" + +#define FATTN_KQ_STRIDE_TILE_F32 32 + +template // D == head size +#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +__launch_bounds__(nwarps*WARP_SIZE, 1) +#endif // !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) +static __global__ void flash_attn_tile_ext_f32( + const char * __restrict__ Q, + const char * __restrict__ K, + const char * __restrict__ V, + const char * __restrict__ mask, + float * __restrict__ dst, + float2 * __restrict__ dst_meta, + const float scale, + const float max_bias, + const float m0, + const float m1, + const uint32_t n_head_log2, + const int ne00, + const int ne01, + const int ne02, + const int ne03, + const int ne10, + const int ne11, + const int ne12, + const int ne13, + const int ne31, + const int nb31, + const int nb01, + const int nb02, + const int nb03, + const int nb11, + const int nb12, + const int nb13, + const int ne0, + const int ne1, + const int ne2, + const int ne3) { + //In this kernel Q, K, V are matrices while i, j, k are matrix indices. + + const int ic0 = (blockIdx.x / parallel_blocks) * ncols; // Index of the Q/QKV column to work on. + const int ip = blockIdx.x % parallel_blocks; // Index in group of blocks running for the same column in parallel. + + const int gqa_ratio = ne02 / ne12; // With grouped query attention there are > 1 Q matrices per K, V matrix. + const float2 * Q_f2 = (const float2 *) (Q + nb02* blockIdx.y + nb01*ic0); + const half2 * K_h2 = (const half2 *) (K + nb12*(blockIdx.y / gqa_ratio)); + const half2 * V_h2 = (const half2 *) (V + nb12*(blockIdx.y / gqa_ratio)); // K and V have same shape + const half * maskh = (const half *) mask + ne11*ic0; + + const int stride_KV2 = nb11 / sizeof(half2); + + float slope = 1.0f; + + // ALiBi + if (max_bias > 0.0f) { + const uint32_t h = blockIdx.y; + + const float base = h < n_head_log2 ? m0 : m1; + const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; + + slope = powf(base, exph); + } + + static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64."); + + __shared__ float KQ[ncols*FATTN_KQ_STRIDE_TILE_F32]; + + __shared__ float KV_tmp[FATTN_KQ_STRIDE_TILE_F32][D + 1]; // Pad D to avoid memory bank conflicts. + float2 * KV_tmp2 = (float2 *) KV_tmp; + + float kqmax[ncols/nwarps]; +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + kqmax[j0/nwarps] = -FLT_MAX/2.0f; + } + float kqsum[ncols/nwarps] = {0.0f}; + + float2 VKQ[ncols/nwarps][(D/2)/WARP_SIZE] = {{{0.0f, 0.0f}}}; + + // Convert Q to half2 and store in registers: + __shared__ float Q_f[ncols][D]; +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + +#pragma unroll + for (int i0 = 0; i0 < D; i0 += 2*WARP_SIZE) { + float2 tmp = Q_f2[j*(nb01/sizeof(float2)) + i0/2 + threadIdx.x]; + Q_f[j][i0 + 0*WARP_SIZE + threadIdx.x] = tmp.x * scale; + Q_f[j][i0 + 1*WARP_SIZE + threadIdx.x] = tmp.y * scale; + } + } + + __syncthreads(); + + const int k_start = parallel_blocks == 1 ? 0 : ip*FATTN_KQ_STRIDE_TILE_F32; + for (int k_VKQ_0 = k_start; k_VKQ_0 < ne11; k_VKQ_0 += parallel_blocks*FATTN_KQ_STRIDE_TILE_F32) { + // Calculate KQ tile and keep track of new maximum KQ values: + + float kqmax_new[ncols/nwarps]; +#pragma unroll + for (int j = 0; j < ncols/nwarps; ++j) { + kqmax_new[j] = kqmax[j]; + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F32; i_KQ_0 += nwarps) { + const int i_KQ = i_KQ_0 + threadIdx.y; + +#pragma unroll + for (int k_KQ_0 = 0; k_KQ_0 < D; k_KQ_0 += 2*WARP_SIZE) { + const half2 tmp = K_h2[(k_VKQ_0 + i_KQ)*stride_KV2 + k_KQ_0/2 + threadIdx.x]; + KV_tmp[i_KQ][k_KQ_0 + 0*WARP_SIZE + threadIdx.x] = __low2float(tmp); + KV_tmp[i_KQ][k_KQ_0 + 1*WARP_SIZE + threadIdx.x] = __high2float(tmp); + } + } + + __syncthreads(); + + float sum[FATTN_KQ_STRIDE_TILE_F32/WARP_SIZE][ncols/nwarps] = {{0.0f}}; + +#pragma unroll + for (int k_KQ = 0; k_KQ < D; ++k_KQ) { + float K_k[FATTN_KQ_STRIDE_TILE_F32/WARP_SIZE]; + float Q_k[ncols/nwarps]; + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F32; i_KQ_0 += WARP_SIZE) { + const int i_KQ = i_KQ_0 + threadIdx.x; + + K_k[i_KQ_0/WARP_SIZE] = KV_tmp[i_KQ][k_KQ]; + } +#pragma unroll + for (int j_KQ_0 = 0; j_KQ_0 < ncols; j_KQ_0 += nwarps) { + const int j_KQ = j_KQ_0 + threadIdx.y; + + Q_k[j_KQ_0/nwarps] = Q_f[j_KQ][k_KQ]; + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F32; i_KQ_0 += WARP_SIZE) { +#pragma unroll + for (int j_KQ_0 = 0; j_KQ_0 < ncols; j_KQ_0 += nwarps) { + sum[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps] += K_k[i_KQ_0/WARP_SIZE] * Q_k[j_KQ_0/nwarps]; + } + } + } + +#pragma unroll + for (int i_KQ_0 = 0; i_KQ_0 < FATTN_KQ_STRIDE_TILE_F32; i_KQ_0 += WARP_SIZE) { + const int i_KQ = i_KQ_0 + threadIdx.x; + +#pragma unroll + for (int j_KQ_0 = 0; j_KQ_0 < ncols; j_KQ_0 += nwarps) { + const int j_KQ = j_KQ_0 + threadIdx.y; + + sum[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps] += mask ? slope*__half2float(maskh[j_KQ*ne11 + k_VKQ_0 + i_KQ]) : 0.0f; + + kqmax_new[j_KQ_0/nwarps] = fmaxf(kqmax_new[j_KQ_0/nwarps], sum[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps]); + + KQ[j_KQ*FATTN_KQ_STRIDE_TILE_F32 + i_KQ] = sum[i_KQ_0/WARP_SIZE][j_KQ_0/nwarps]; + } + } + + __syncthreads(); + +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + kqmax_new[j0/nwarps] = warp_reduce_max(kqmax_new[j0/nwarps]); + const float KQ_max_scale = expf(kqmax[j0/nwarps] - kqmax_new[j0/nwarps]); + kqmax[j0/nwarps] = kqmax_new[j0/nwarps]; + + float kqsum_add = 0.0f; +#pragma unroll + for (int i0 = 0; i0 < FATTN_KQ_STRIDE_TILE_F32; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + const float diff = KQ[j*FATTN_KQ_STRIDE_TILE_F32 + i] - kqmax[j0/nwarps]; + const float val = expf(diff); + kqsum_add += val; + KQ[j*FATTN_KQ_STRIDE_TILE_F32 + i] = val; + } + kqsum[j0/nwarps] = kqsum[j0/nwarps]*KQ_max_scale + kqsum_add; + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + VKQ[j0/nwarps][i0/WARP_SIZE].x *= KQ_max_scale; + VKQ[j0/nwarps][i0/WARP_SIZE].y *= KQ_max_scale; + } + } + + __syncthreads(); + +#pragma unroll + for (int k0 = 0; k0 < FATTN_KQ_STRIDE_TILE_F32; k0 += nwarps) { + const int k = k0 + threadIdx.y; + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + KV_tmp2[k*(D/2) + i].x = __low2float(V_h2[(k_VKQ_0 + k)*stride_KV2 + i]); + KV_tmp2[k*(D/2) + i].y = __high2float(V_h2[(k_VKQ_0 + k)*stride_KV2 + i]); + } + } + + __syncthreads(); + +#pragma unroll + for (int k = 0; k < FATTN_KQ_STRIDE_TILE_F32; ++k) { + float2 V_k[(D/2)/WARP_SIZE]; + float KQ_k[ncols/nwarps]; + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { + const int i = i0 + threadIdx.x; + + V_k[i0/WARP_SIZE] = KV_tmp2[k*(D/2) + i]; + } +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + const int j = j0 + threadIdx.y; + + KQ_k[j0/nwarps] = KQ[j*FATTN_KQ_STRIDE_TILE_F32 + k]; + } + +#pragma unroll + for (int i0 = 0; i0 < D/2; i0 += WARP_SIZE) { +#pragma unroll + for (int j0 = 0; j0 < ncols; j0 += nwarps) { + VKQ[j0/nwarps][i0/WARP_SIZE].x += V_k[i0/WARP_SIZE].x*KQ_k[j0/nwarps]; + VKQ[j0/nwarps][i0/WARP_SIZE].y += V_k[i0/WARP_SIZE].y*KQ_k[j0/nwarps]; + } + } + } + + __syncthreads(); + } + +#pragma unroll + for (int j_VKQ_0 = 0; j_VKQ_0 < ncols; j_VKQ_0 += nwarps) { + const int j_VKQ = j_VKQ_0 + threadIdx.y; + + float kqsum_j = kqsum[j_VKQ_0/nwarps]; + kqsum_j = warp_reduce_sum(kqsum_j); + +#pragma unroll + for (int i00 = 0; i00 < D; i00 += 2*WARP_SIZE) { + const int i0 = i00 + 2*threadIdx.x; + + float2 dst_val = VKQ[j_VKQ_0/nwarps][i0/(2*WARP_SIZE)]; + if (parallel_blocks == 1) { + dst_val.x /= kqsum_j; + dst_val.y /= kqsum_j; + } + const int j_dst = (ic0 + j_VKQ)*parallel_blocks + ip; + dst[j_dst*D*gridDim.y + D*blockIdx.y + i0 + 0] = dst_val.x; + dst[j_dst*D*gridDim.y + D*blockIdx.y + i0 + 1] = dst_val.y; + } + + if (parallel_blocks != 1 && threadIdx.x == 0) { + dst_meta[(ic0 + j_VKQ)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[j_VKQ_0/nwarps], kqsum_j); + } + } +} + +template void launch_fattn_tile_f32( + const ggml_tensor * Q, const ggml_tensor * K, const ggml_tensor * V, ggml_tensor * KQV, const ggml_tensor * mask, + ggml_cuda_pool & pool, cudaStream_t main_stream +) { + ggml_cuda_pool_alloc dst_tmp(pool); + ggml_cuda_pool_alloc dst_tmp_meta(pool); + + if (parallel_blocks > 1) { + dst_tmp.alloc(parallel_blocks*ggml_nelements(KQV)); + dst_tmp_meta.alloc(parallel_blocks*ggml_nrows(KQV)); + } + + constexpr int nwarps = 8; + const dim3 block_dim(WARP_SIZE, nwarps, 1); + const dim3 blocks_num(parallel_blocks*((Q->ne[1] + cols_per_block - 1) / cols_per_block), Q->ne[2], Q->ne[3]); + const int shmem = 0; + + float scale = 1.0f; + float max_bias = 0.0f; + + memcpy(&scale, (float *) KQV->op_params + 0, sizeof(float)); + memcpy(&max_bias, (float *) KQV->op_params + 1, sizeof(float)); + + const uint32_t n_head = Q->ne[2]; + const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head)); + + const float m0 = powf(2.0f, -(max_bias ) / n_head_log2); + const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2); + + flash_attn_tile_ext_f32 + <<>> ( + (const char *) Q->data, + (const char *) K->data, + (const char *) V->data, + mask ? ((const char *) mask->data) : nullptr, + parallel_blocks == 1 ? (float *) KQV->data : dst_tmp.ptr, dst_tmp_meta.ptr, + scale, max_bias, m0, m1, n_head_log2, + Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3], + K->ne[0], K->ne[1], K->ne[2], K->ne[3], + mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0, + Q->nb[1], Q->nb[2], Q->nb[3], + K->nb[1], K->nb[2], K->nb[3], + KQV->ne[0], KQV->ne[1], KQV->ne[2], KQV->ne[3] + ); + CUDA_CHECK(cudaGetLastError()); + + if (parallel_blocks == 1) { + return; + } + + const dim3 block_dim_combine(D, 1, 1); + const dim3 blocks_num_combine(Q->ne[1], blocks_num.y, blocks_num.z); + const int shmem_combine = 0; + + flash_attn_combine_results + <<>> + (dst_tmp.ptr, dst_tmp_meta.ptr, (float *) KQV->data); + CUDA_CHECK(cudaGetLastError()); +} + +void ggml_cuda_flash_attn_ext_tile_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst) { + const ggml_tensor * Q = dst->src[0]; + const ggml_tensor * K = dst->src[1]; + const ggml_tensor * V = dst->src[2]; + + const ggml_tensor * mask = dst->src[3]; + + ggml_tensor * KQV = dst; + + const int32_t precision = KQV->op_params[2]; + GGML_ASSERT(precision == GGML_PREC_DEFAULT); + GGML_ASSERT(Q->ne[0] == 64 || Q->ne[0] == 128 && "FlashAttention without tensor cores only supports head sizes 64 and 128."); + + if (Q->ne[1] <= 16) { + constexpr int cols_per_block = 16; + constexpr int parallel_blocks = 4; + switch (Q->ne[0]) { + case 64: + launch_fattn_tile_f32< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream()); + break; + case 128: + launch_fattn_tile_f32<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream()); + break; + default: + GGML_ASSERT(false); + break; + } + return; + } + + if (Q->ne[1] <= 32) { + constexpr int cols_per_block = 32; + constexpr int parallel_blocks = 4; + switch (Q->ne[0]) { + case 64: + launch_fattn_tile_f32< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream()); + break; + case 128: + launch_fattn_tile_f32<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream()); + break; + default: + GGML_ASSERT(false); + break; + } + return; + } + + constexpr int cols_per_block = 32; + constexpr int parallel_blocks = 1; + switch (Q->ne[0]) { + case 64: + launch_fattn_tile_f32< 64, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream()); + break; + case 128: + launch_fattn_tile_f32<128, cols_per_block, parallel_blocks>(Q, K, V, KQV, mask, ctx.pool(), ctx.stream()); + break; + default: + GGML_ASSERT(false); + break; + } +} diff --git a/ggml-cuda/fattn-tile-f32.cuh b/ggml-cuda/fattn-tile-f32.cuh new file mode 100644 index 000000000..b1c546c80 --- /dev/null +++ b/ggml-cuda/fattn-tile-f32.cuh @@ -0,0 +1,3 @@ +#include "common.cuh" + +void ggml_cuda_flash_attn_ext_tile_f32(ggml_backend_cuda_context & ctx, ggml_tensor * dst); diff --git a/ggml-cuda/fattn-vec-f16.cu b/ggml-cuda/fattn-vec-f16.cu index cbf5f7835..a18be5ddc 100644 --- a/ggml-cuda/fattn-vec-f16.cu +++ b/ggml-cuda/fattn-vec-f16.cu @@ -57,7 +57,7 @@ static __global__ void flash_attn_vec_ext_f16( // ALiBi if (max_bias > 0.0f) { - const int h = blockIdx.y; + const uint32_t h = blockIdx.y; const float base = h < n_head_log2 ? m0 : m1; const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; @@ -232,11 +232,8 @@ static __global__ void flash_attn_vec_ext_f16( dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val; } - if (parallel_blocks != 1 && tid != 0) { -#pragma unroll - for (int j = 0; j < ncols; ++j) { - dst_meta[(ic0 + j)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[j], kqsum[j]); - } + if (parallel_blocks != 1 && threadIdx.x < ncols) { + dst_meta[(ic0 + threadIdx.x)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[threadIdx.x], kqsum[threadIdx.x]); } #else NO_DEVICE_CODE; diff --git a/ggml-cuda/fattn-vec-f32.cu b/ggml-cuda/fattn-vec-f32.cu index 40c336ce3..91fcdc8c3 100644 --- a/ggml-cuda/fattn-vec-f32.cu +++ b/ggml-cuda/fattn-vec-f32.cu @@ -56,7 +56,7 @@ static __global__ void flash_attn_vec_ext_f32( // ALiBi if (max_bias > 0.0f) { - const int h = blockIdx.y; + const uint32_t h = blockIdx.y; const float base = h < n_head_log2 ? m0 : m1; const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; @@ -221,11 +221,8 @@ static __global__ void flash_attn_vec_ext_f32( dst[j_dst*D*gridDim.y + D*blockIdx.y + tid] = dst_val; } - if (parallel_blocks != 1 && tid != 0) { -#pragma unroll - for (int j = 0; j < ncols; ++j) { - dst_meta[(ic0 + j)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[j], kqsum[j]); - } + if (parallel_blocks != 1 && threadIdx.x < ncols) { + dst_meta[(ic0 + threadIdx.x)*gridDim.y*parallel_blocks + blockIdx.y*parallel_blocks + ip] = make_float2(kqmax[threadIdx.x], kqsum[threadIdx.x]); } } diff --git a/ggml-cuda/fattn.cu b/ggml-cuda/fattn.cu index 419f8e752..a1918e258 100644 --- a/ggml-cuda/fattn.cu +++ b/ggml-cuda/fattn.cu @@ -1,5 +1,7 @@ #include "common.cuh" #include "fattn-common.cuh" +#include "fattn-tile-f16.cuh" +#include "fattn-tile-f32.cuh" #include "fattn-vec-f16.cuh" #include "fattn-vec-f32.cuh" #include "fattn.cuh" @@ -88,7 +90,7 @@ static __global__ void flash_attn_ext_f16( // ALiBi if (max_bias > 0.0f) { - const int h = blockIdx.y; + const uint32_t h = blockIdx.y; const float base = h < n_head_log2 ? m0 : m1; const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1; @@ -541,13 +543,31 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst const int32_t precision = KQV->op_params[2]; + // On AMD the tile kernels perform poorly, use the vec kernel instead: + if (cc >= CC_OFFSET_AMD) { + if (precision == GGML_PREC_DEFAULT) { + ggml_cuda_flash_attn_ext_vec_f16_no_mma(ctx, dst); + } else { + ggml_cuda_flash_attn_ext_vec_f32(ctx, dst); + } + return; + } + if (!fast_fp16_available(cc)) { - ggml_cuda_flash_attn_ext_vec_f32(ctx, dst); + if (Q->ne[1] <= 8) { + ggml_cuda_flash_attn_ext_vec_f32(ctx, dst); + } else { + ggml_cuda_flash_attn_ext_tile_f32(ctx, dst); + } return; } if (!fp16_mma_available(cc)) { - ggml_cuda_flash_attn_ext_vec_f16_no_mma(ctx, dst); + if (Q->ne[1] <= 8) { + ggml_cuda_flash_attn_ext_vec_f16_no_mma(ctx, dst); + } else { + ggml_cuda_flash_attn_ext_tile_f16(ctx, dst); + } return; }