Merge branch 'master' into HEAD
This commit is contained in:
commit
2f7f634143
23 changed files with 1347 additions and 378 deletions
6
.github/workflows/build.yml
vendored
6
.github/workflows/build.yml
vendored
|
@ -10,10 +10,10 @@ on:
|
||||||
push:
|
push:
|
||||||
branches:
|
branches:
|
||||||
- master
|
- master
|
||||||
paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu']
|
paths: ['.github/workflows/**', '**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m']
|
||||||
pull_request:
|
pull_request:
|
||||||
types: [opened, synchronize, reopened]
|
types: [opened, synchronize, reopened]
|
||||||
paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu']
|
paths: ['**/CMakeLists.txt', '**/Makefile', '**/*.h', '**/*.hpp', '**/*.c', '**/*.cpp', '**/*.cu', '**/*.swift', '**/*.m']
|
||||||
|
|
||||||
env:
|
env:
|
||||||
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
BRANCH_NAME: ${{ github.head_ref || github.ref_name }}
|
||||||
|
@ -258,7 +258,7 @@ jobs:
|
||||||
|
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
destination: ['platform=macOS,name=Any Mac', 'platform=iOS,name=Any iOS Device', 'platform=tvOS,name=Any tvOS Device']
|
destination: ['generic/platform=macOS', 'generic/platform=iOS', 'generic/platform=tvOS']
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Clone
|
- name: Clone
|
||||||
|
|
3
.github/workflows/gguf-publish.yml
vendored
3
.github/workflows/gguf-publish.yml
vendored
|
@ -36,8 +36,9 @@ jobs:
|
||||||
poetry install
|
poetry install
|
||||||
|
|
||||||
- name: Build package
|
- name: Build package
|
||||||
run: poetry build
|
run: cd gguf-py && poetry build
|
||||||
- name: Publish package
|
- name: Publish package
|
||||||
uses: pypa/gh-action-pypi-publish@release/v1
|
uses: pypa/gh-action-pypi-publish@release/v1
|
||||||
with:
|
with:
|
||||||
password: ${{ secrets.PYPI_API_TOKEN }}
|
password: ${{ secrets.PYPI_API_TOKEN }}
|
||||||
|
packages-dir: gguf-py/dist
|
||||||
|
|
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -10,6 +10,7 @@
|
||||||
*.gcno
|
*.gcno
|
||||||
*.gcda
|
*.gcda
|
||||||
*.dot
|
*.dot
|
||||||
|
*.metallib
|
||||||
.DS_Store
|
.DS_Store
|
||||||
.build/
|
.build/
|
||||||
.cache/
|
.cache/
|
||||||
|
|
|
@ -10,15 +10,18 @@ let platforms: [SupportedPlatform]? = [
|
||||||
.tvOS(.v14)
|
.tvOS(.v14)
|
||||||
]
|
]
|
||||||
let exclude: [String] = []
|
let exclude: [String] = []
|
||||||
let additionalSources: [String] = ["ggml-metal.m", "ggml-metal.metal"]
|
let resources: [Resource] = [
|
||||||
|
.process("ggml-metal.metal")
|
||||||
|
]
|
||||||
|
let additionalSources: [String] = ["ggml-metal.m"]
|
||||||
let additionalSettings: [CSetting] = [
|
let additionalSettings: [CSetting] = [
|
||||||
.unsafeFlags(["-fno-objc-arc"]),
|
.unsafeFlags(["-fno-objc-arc"]),
|
||||||
.define("GGML_SWIFT"),
|
|
||||||
.define("GGML_USE_METAL")
|
.define("GGML_USE_METAL")
|
||||||
]
|
]
|
||||||
#else
|
#else
|
||||||
let platforms: [SupportedPlatform]? = nil
|
let platforms: [SupportedPlatform]? = nil
|
||||||
let exclude: [String] = ["ggml-metal.metal"]
|
let exclude: [String] = ["ggml-metal.metal"]
|
||||||
|
let resources: [Resource] = []
|
||||||
let additionalSources: [String] = []
|
let additionalSources: [String] = []
|
||||||
let additionalSettings: [CSetting] = []
|
let additionalSettings: [CSetting] = []
|
||||||
#endif
|
#endif
|
||||||
|
@ -40,6 +43,7 @@ let package = Package(
|
||||||
"ggml-alloc.c",
|
"ggml-alloc.c",
|
||||||
"k_quants.c",
|
"k_quants.c",
|
||||||
] + additionalSources,
|
] + additionalSources,
|
||||||
|
resources: resources,
|
||||||
publicHeadersPath: "spm-headers",
|
publicHeadersPath: "spm-headers",
|
||||||
cSettings: [
|
cSettings: [
|
||||||
.unsafeFlags(["-Wno-shorten-64-to-32"]),
|
.unsafeFlags(["-Wno-shorten-64-to-32"]),
|
||||||
|
|
27
README.md
27
README.md
|
@ -95,6 +95,7 @@ as the main playground for developing new features for the [ggml](https://github
|
||||||
- [X] [Aquila-7B](https://huggingface.co/BAAI/Aquila-7B) / [AquilaChat-7B](https://huggingface.co/BAAI/AquilaChat-7B)
|
- [X] [Aquila-7B](https://huggingface.co/BAAI/Aquila-7B) / [AquilaChat-7B](https://huggingface.co/BAAI/AquilaChat-7B)
|
||||||
- [X] [Starcoder models](https://github.com/ggerganov/llama.cpp/pull/3187)
|
- [X] [Starcoder models](https://github.com/ggerganov/llama.cpp/pull/3187)
|
||||||
- [X] [Mistral AI v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
|
- [X] [Mistral AI v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1)
|
||||||
|
- [X] [Refact](https://huggingface.co/smallcloudai/Refact-1_6B-fim)
|
||||||
|
|
||||||
**Bindings:**
|
**Bindings:**
|
||||||
|
|
||||||
|
@ -377,7 +378,7 @@ Building the program with BLAS support may lead to some performance improvements
|
||||||
|
|
||||||
- #### cuBLAS
|
- #### cuBLAS
|
||||||
|
|
||||||
This provides BLAS acceleration using the CUDA cores of your Nvidia GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager or from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads).
|
This provides BLAS acceleration using the CUDA cores of your Nvidia GPU. Make sure to have the CUDA toolkit installed. You can download it from your Linux distro's package manager (e.g. `apt install nvidia-cuda-toolkit`) or from here: [CUDA Toolkit](https://developer.nvidia.com/cuda-downloads).
|
||||||
- Using `make`:
|
- Using `make`:
|
||||||
```bash
|
```bash
|
||||||
make LLAMA_CUBLAS=1
|
make LLAMA_CUBLAS=1
|
||||||
|
@ -613,6 +614,18 @@ For more information, see [https://huggingface.co/docs/transformers/perplexity](
|
||||||
The perplexity measurements in table above are done against the `wikitext2` test dataset (https://paperswithcode.com/dataset/wikitext-2), with context length of 512.
|
The perplexity measurements in table above are done against the `wikitext2` test dataset (https://paperswithcode.com/dataset/wikitext-2), with context length of 512.
|
||||||
The time per token is measured on a MacBook M1 Pro 32GB RAM using 4 and 8 threads.
|
The time per token is measured on a MacBook M1 Pro 32GB RAM using 4 and 8 threads.
|
||||||
|
|
||||||
|
#### How to run
|
||||||
|
|
||||||
|
1. Download/extract: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
||||||
|
2. Run `./perplexity -m models/7B/ggml-model-q4_0.gguf -f wiki.test.raw`
|
||||||
|
3. Output:
|
||||||
|
```
|
||||||
|
perplexity : calculating perplexity over 655 chunks
|
||||||
|
24.43 seconds per pass - ETA 4.45 hours
|
||||||
|
[1]4.5970,[2]5.1807,[3]6.0382,...
|
||||||
|
```
|
||||||
|
And after 4.45 hours, you will have the final perplexity.
|
||||||
|
|
||||||
### Interactive mode
|
### Interactive mode
|
||||||
|
|
||||||
If you want a more ChatGPT-like experience, you can run in interactive mode by passing `-i` as a parameter.
|
If you want a more ChatGPT-like experience, you can run in interactive mode by passing `-i` as a parameter.
|
||||||
|
@ -775,18 +788,6 @@ If your issue is with model generation quality, then please at least scan the fo
|
||||||
- [Aligning language models to follow instructions](https://openai.com/research/instruction-following)
|
- [Aligning language models to follow instructions](https://openai.com/research/instruction-following)
|
||||||
- [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155)
|
- [Training language models to follow instructions with human feedback](https://arxiv.org/abs/2203.02155)
|
||||||
|
|
||||||
#### How to run
|
|
||||||
|
|
||||||
1. Download/extract: https://s3.amazonaws.com/research.metamind.io/wikitext/wikitext-2-raw-v1.zip?ref=salesforce-research
|
|
||||||
2. Run `./perplexity -m models/7B/ggml-model-q4_0.gguf -f wiki.test.raw`
|
|
||||||
3. Output:
|
|
||||||
```
|
|
||||||
perplexity : calculating perplexity over 655 chunks
|
|
||||||
24.43 seconds per pass - ETA 4.45 hours
|
|
||||||
[1]4.5970,[2]5.1807,[3]6.0382,...
|
|
||||||
```
|
|
||||||
And after 4.45 hours, you will have the final perplexity.
|
|
||||||
|
|
||||||
### Android
|
### Android
|
||||||
|
|
||||||
#### Building the Project using Android NDK
|
#### Building the Project using Android NDK
|
||||||
|
|
|
@ -111,12 +111,14 @@ pub fn build(b: *std.build.Builder) !void {
|
||||||
const common = make.obj("common", "common/common.cpp");
|
const common = make.obj("common", "common/common.cpp");
|
||||||
const console = make.obj("common", "common/console.cpp");
|
const console = make.obj("common", "common/console.cpp");
|
||||||
const grammar_parser = make.obj("grammar-parser", "common/grammar-parser.cpp");
|
const grammar_parser = make.obj("grammar-parser", "common/grammar-parser.cpp");
|
||||||
|
const train = make.obj("train", "common/train.cpp");
|
||||||
|
|
||||||
_ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, llama, common, console, grammar_parser });
|
_ = make.exe("main", "examples/main/main.cpp", &.{ ggml, ggml_alloc, llama, common, console, grammar_parser });
|
||||||
_ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, llama, common });
|
_ = make.exe("quantize", "examples/quantize/quantize.cpp", &.{ ggml, ggml_alloc, llama, common });
|
||||||
_ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, llama, common });
|
_ = make.exe("perplexity", "examples/perplexity/perplexity.cpp", &.{ ggml, ggml_alloc, llama, common });
|
||||||
_ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, llama, common });
|
_ = make.exe("embedding", "examples/embedding/embedding.cpp", &.{ ggml, ggml_alloc, llama, common });
|
||||||
_ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, llama, common });
|
_ = make.exe("finetune", "examples/finetune/finetune.cpp", &.{ ggml, ggml_alloc, llama, common, train });
|
||||||
|
_ = make.exe("train-text-from-scratch", "examples/train-text-from-scratch/train-text-from-scratch.cpp", &.{ ggml, ggml_alloc, llama, common, train });
|
||||||
|
|
||||||
const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, llama, common, grammar_parser });
|
const server = make.exe("server", "examples/server/server.cpp", &.{ ggml, ggml_alloc, llama, common, grammar_parser });
|
||||||
if (server.target.isWindows()) {
|
if (server.target.isWindows()) {
|
||||||
|
|
|
@ -167,8 +167,10 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
// store the external file name in params
|
||||||
|
params.prompt_file = argv[i];
|
||||||
std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.prompt));
|
std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.prompt));
|
||||||
if (params.prompt.back() == '\n') {
|
if (!params.prompt.empty() && params.prompt.back() == '\n') {
|
||||||
params.prompt.pop_back();
|
params.prompt.pop_back();
|
||||||
}
|
}
|
||||||
} else if (arg == "-n" || arg == "--n-predict") {
|
} else if (arg == "-n" || arg == "--n-predict") {
|
||||||
|
@ -293,7 +295,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.cfg_negative_prompt));
|
std::copy(std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>(), back_inserter(params.cfg_negative_prompt));
|
||||||
if (params.cfg_negative_prompt.back() == '\n') {
|
if (!params.cfg_negative_prompt.empty() && params.cfg_negative_prompt.back() == '\n') {
|
||||||
params.cfg_negative_prompt.pop_back();
|
params.cfg_negative_prompt.pop_back();
|
||||||
}
|
}
|
||||||
} else if (arg == "--cfg-scale") {
|
} else if (arg == "--cfg-scale") {
|
||||||
|
@ -1020,10 +1022,11 @@ llama_token llama_sample_token(
|
||||||
id = llama_sample_token_mirostat_v2(ctx, &cur_p, mirostat_tau, mirostat_eta, &mirostat_mu);
|
id = llama_sample_token_mirostat_v2(ctx, &cur_p, mirostat_tau, mirostat_eta, &mirostat_mu);
|
||||||
} else {
|
} else {
|
||||||
// Temperature sampling
|
// Temperature sampling
|
||||||
llama_sample_top_k (ctx, &cur_p, top_k, 1);
|
size_t min_keep = std::max(1, params.n_probs);
|
||||||
llama_sample_tail_free (ctx, &cur_p, tfs_z, 1);
|
llama_sample_top_k (ctx, &cur_p, top_k, min_keep);
|
||||||
llama_sample_typical (ctx, &cur_p, typical_p, 1);
|
llama_sample_tail_free (ctx, &cur_p, tfs_z, min_keep);
|
||||||
llama_sample_top_p (ctx, &cur_p, top_p, 1);
|
llama_sample_typical (ctx, &cur_p, typical_p, min_keep);
|
||||||
|
llama_sample_top_p (ctx, &cur_p, top_p, min_keep);
|
||||||
llama_sample_temp(ctx, &cur_p, temp);
|
llama_sample_temp(ctx, &cur_p, temp);
|
||||||
|
|
||||||
{
|
{
|
||||||
|
|
|
@ -79,6 +79,7 @@ struct gpt_params {
|
||||||
std::string model_draft = ""; // draft model for speculative decoding
|
std::string model_draft = ""; // draft model for speculative decoding
|
||||||
std::string model_alias = "unknown"; // model alias
|
std::string model_alias = "unknown"; // model alias
|
||||||
std::string prompt = "";
|
std::string prompt = "";
|
||||||
|
std::string prompt_file = ""; // store the external prompt file name
|
||||||
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
|
||||||
std::string input_prefix = ""; // string to prefix user inputs with
|
std::string input_prefix = ""; // string to prefix user inputs with
|
||||||
std::string input_suffix = ""; // string to suffix user inputs with
|
std::string input_suffix = ""; // string to suffix user inputs with
|
||||||
|
|
130
convert-persimmon-to-gguf.py
Normal file
130
convert-persimmon-to-gguf.py
Normal file
|
@ -0,0 +1,130 @@
|
||||||
|
import torch
|
||||||
|
import os
|
||||||
|
from pprint import pprint
|
||||||
|
import sys
|
||||||
|
import argparse
|
||||||
|
from pathlib import Path
|
||||||
|
from sentencepiece import SentencePieceProcessor
|
||||||
|
if 'NO_LOCAL_GGUF' not in os.environ:
|
||||||
|
sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
|
||||||
|
import gguf
|
||||||
|
|
||||||
|
def _flatten_dict(dct, tensors, prefix=None):
|
||||||
|
assert isinstance(dct, dict)
|
||||||
|
for key in dct.keys():
|
||||||
|
new_prefix = prefix + '.' + key if prefix is not None else key
|
||||||
|
if isinstance(dct[key], torch.Tensor):
|
||||||
|
tensors[new_prefix] = dct[key]
|
||||||
|
elif isinstance(dct[key], dict):
|
||||||
|
_flatten_dict(dct[key], tensors, new_prefix)
|
||||||
|
else:
|
||||||
|
raise ValueError(type(dct[key]))
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _get_sentencepiece_tokenizer_info(dir_model: Path):
|
||||||
|
tokenizer_path = dir_model / 'adept_vocab.model'
|
||||||
|
print('gguf: getting sentencepiece tokenizer from', tokenizer_path)
|
||||||
|
tokenizer = SentencePieceProcessor(str(tokenizer_path))
|
||||||
|
print('gguf: adding tokens')
|
||||||
|
tokens: list[bytes] = []
|
||||||
|
scores: list[float] = []
|
||||||
|
toktypes: list[int] = []
|
||||||
|
|
||||||
|
for i in range(tokenizer.vocab_size()):
|
||||||
|
text: bytes
|
||||||
|
score: float
|
||||||
|
|
||||||
|
piece = tokenizer.id_to_piece(i)
|
||||||
|
text = piece.encode("utf-8")
|
||||||
|
score = tokenizer.get_score(i)
|
||||||
|
|
||||||
|
toktype = 1
|
||||||
|
if tokenizer.is_unknown(i):
|
||||||
|
toktype = 2
|
||||||
|
if tokenizer.is_control(i):
|
||||||
|
toktype = 3
|
||||||
|
if tokenizer.is_unused(i):
|
||||||
|
toktype = 5
|
||||||
|
if tokenizer.is_byte(i):
|
||||||
|
toktype = 6
|
||||||
|
|
||||||
|
tokens.append(text)
|
||||||
|
scores.append(score)
|
||||||
|
toktypes.append(toktype)
|
||||||
|
pass
|
||||||
|
return tokens, scores, toktypes
|
||||||
|
|
||||||
|
def main():
|
||||||
|
parser = argparse.ArgumentParser(description="Convert a Persimmon model from Adept (e.g. Persimmon 8b chat) to a GGML compatible file")
|
||||||
|
parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
|
||||||
|
parser.add_argument("--ckpt-path", type=Path, help="path to persimmon checkpoint .pt file")
|
||||||
|
parser.add_argument("--model-dir", type=Path, help="directory containing model e.g. 8b_chat_model_release")
|
||||||
|
parser.add_argument("--adept-inference-dir", type=str, help="path to adept-inference code directory")
|
||||||
|
args = parser.parse_args()
|
||||||
|
sys.path.append(str(args.adept_inference_dir))
|
||||||
|
persimmon_model = torch.load(args.ckpt_path)
|
||||||
|
hparams = persimmon_model['args']
|
||||||
|
pprint(hparams)
|
||||||
|
tensors = {}
|
||||||
|
_flatten_dict(persimmon_model['model'], tensors, None)
|
||||||
|
|
||||||
|
arch = gguf.MODEL_ARCH.PERSIMMON
|
||||||
|
gguf_writer = gguf.GGUFWriter(args.outfile, gguf.MODEL_ARCH_NAMES[arch])
|
||||||
|
|
||||||
|
block_count = hparams.num_layers
|
||||||
|
head_count = hparams.num_attention_heads
|
||||||
|
head_count_kv = head_count
|
||||||
|
ctx_length = hparams.seq_length
|
||||||
|
hidden_size = hparams.hidden_size
|
||||||
|
|
||||||
|
gguf_writer.add_name('persimmon-8b-chat')
|
||||||
|
gguf_writer.add_context_length(ctx_length)
|
||||||
|
gguf_writer.add_embedding_length(hidden_size)
|
||||||
|
gguf_writer.add_block_count(block_count)
|
||||||
|
gguf_writer.add_feed_forward_length(hparams.ffn_hidden_size)
|
||||||
|
gguf_writer.add_rope_dimension_count(hidden_size // head_count)
|
||||||
|
gguf_writer.add_head_count(head_count)
|
||||||
|
gguf_writer.add_head_count_kv(head_count_kv)
|
||||||
|
gguf_writer.add_rope_freq_base(hparams.rotary_emb_base)
|
||||||
|
gguf_writer.add_layer_norm_eps(hparams.layernorm_epsilon)
|
||||||
|
|
||||||
|
tokens, scores, toktypes = _get_sentencepiece_tokenizer_info(args.model_dir)
|
||||||
|
gguf_writer.add_tokenizer_model('llama')
|
||||||
|
gguf_writer.add_token_list(tokens)
|
||||||
|
gguf_writer.add_token_scores(scores)
|
||||||
|
gguf_writer.add_token_types(toktypes)
|
||||||
|
gguf_writer.add_bos_token_id(71013)
|
||||||
|
gguf_writer.add_eos_token_id(71013)
|
||||||
|
|
||||||
|
tensor_map = gguf.get_tensor_name_map(arch, block_count)
|
||||||
|
print(tensor_map)
|
||||||
|
for name in tensors.keys():
|
||||||
|
data = tensors[name]
|
||||||
|
if name.endswith(".self_attention.rotary_emb.inv_freq"):
|
||||||
|
continue
|
||||||
|
old_dtype = data.dtype
|
||||||
|
# TODO: FP16 conversion produces garbage outputs. (Q8_0 does not, so..?)
|
||||||
|
data = data.to(torch.float32).squeeze().numpy()
|
||||||
|
new_name = tensor_map.get_name(name, try_suffixes = (".weight", ".bias"))
|
||||||
|
if new_name is None:
|
||||||
|
print("Can not map tensor '" + name + "'")
|
||||||
|
sys.exit()
|
||||||
|
n_dims = len(data.shape)
|
||||||
|
print(new_name + ", n_dims = " + str(n_dims) + ", " + str(old_dtype) + " --> " + str(data.dtype))
|
||||||
|
gguf_writer.add_tensor(new_name, data)
|
||||||
|
print("gguf: write header")
|
||||||
|
gguf_writer.write_header_to_file()
|
||||||
|
print("gguf: write metadata")
|
||||||
|
gguf_writer.write_kv_data_to_file()
|
||||||
|
print("gguf: write tensors")
|
||||||
|
gguf_writer.write_tensors_to_file()
|
||||||
|
|
||||||
|
gguf_writer.close()
|
||||||
|
|
||||||
|
print(f"gguf: model successfully exported to '{args.outfile}'")
|
||||||
|
print("")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
|
@ -2,7 +2,7 @@
|
||||||
|
|
||||||
This is pretty much just a straight port of aigoopy/llm-jeopardy/ with an added graph viewer.
|
This is pretty much just a straight port of aigoopy/llm-jeopardy/ with an added graph viewer.
|
||||||
|
|
||||||
The jeopardy test can be used to compare the fact knowledge of different models and compare them to eachother. This is in contrast to some other tests, which test logical deduction, creativity, writing skills, etc.
|
The jeopardy test can be used to compare the fact knowledge of different models and compare them to each other. This is in contrast to some other tests, which test logical deduction, creativity, writing skills, etc.
|
||||||
|
|
||||||
|
|
||||||
Step 1: Open jeopardy.sh and modify the following:
|
Step 1: Open jeopardy.sh and modify the following:
|
||||||
|
|
|
@ -10,6 +10,7 @@
|
||||||
#include <cstdio>
|
#include <cstdio>
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
#include <ctime>
|
||||||
|
|
||||||
// trim whitespace from the beginning and end of a string
|
// trim whitespace from the beginning and end of a string
|
||||||
static std::string trim(const std::string & str) {
|
static std::string trim(const std::string & str) {
|
||||||
|
@ -70,6 +71,26 @@ struct client {
|
||||||
std::vector<llama_token> tokens_prev;
|
std::vector<llama_token> tokens_prev;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static void print_date_time() {
|
||||||
|
std::time_t current_time = std::time(nullptr);
|
||||||
|
std::tm* local_time = std::localtime(¤t_time);
|
||||||
|
char buffer[80];
|
||||||
|
strftime(buffer, sizeof(buffer), "%Y-%m-%d %H:%M:%S", local_time);
|
||||||
|
|
||||||
|
printf("\n\033[35mrun parameters as at %s\033[0m\n", buffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Define a split string function to ...
|
||||||
|
static std::vector<std::string> split_string(const std::string& input, char delimiter) {
|
||||||
|
std::vector<std::string> tokens;
|
||||||
|
std::istringstream stream(input);
|
||||||
|
std::string token;
|
||||||
|
while (std::getline(stream, token, delimiter)) {
|
||||||
|
tokens.push_back(token);
|
||||||
|
}
|
||||||
|
return tokens;
|
||||||
|
}
|
||||||
|
|
||||||
int main(int argc, char ** argv) {
|
int main(int argc, char ** argv) {
|
||||||
srand(1234);
|
srand(1234);
|
||||||
|
|
||||||
|
@ -104,6 +125,23 @@ int main(int argc, char ** argv) {
|
||||||
params.logits_all = true;
|
params.logits_all = true;
|
||||||
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||||
|
|
||||||
|
// load the prompts from an external file if there are any
|
||||||
|
if (params.prompt.empty()) {
|
||||||
|
printf("\n\033[32mNo new questions so proceed with build-in defaults.\033[0m\n");
|
||||||
|
} else {
|
||||||
|
// Output each line of the input params.prompts vector and copy to k_prompts
|
||||||
|
int index = 0;
|
||||||
|
printf("\n\033[32mNow printing the external prompt file %s\033[0m\n\n", params.prompt_file.c_str());
|
||||||
|
|
||||||
|
std::vector<std::string> prompts = split_string(params.prompt, '\n');
|
||||||
|
for (const auto& prompt : prompts) {
|
||||||
|
k_prompts.resize(index + 1);
|
||||||
|
k_prompts[index] = prompt;
|
||||||
|
index++;
|
||||||
|
printf("%3d prompt: %s\n", index, prompt.c_str());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
fprintf(stderr, "\n\n");
|
fprintf(stderr, "\n\n");
|
||||||
fflush(stderr);
|
fflush(stderr);
|
||||||
|
|
||||||
|
@ -233,7 +271,7 @@ int main(int argc, char ** argv) {
|
||||||
client.n_decoded = 0;
|
client.n_decoded = 0;
|
||||||
client.i_batch = batch.n_tokens - 1;
|
client.i_batch = batch.n_tokens - 1;
|
||||||
|
|
||||||
LOG_TEE("\033[1mClient %3d, seq %4d, started decoding ...\033[0m\n", client.id, client.seq_id);
|
LOG_TEE("\033[31mClient %3d, seq %4d, started decoding ...\033[0m\n", client.id, client.seq_id);
|
||||||
|
|
||||||
g_seq_id += 1;
|
g_seq_id += 1;
|
||||||
|
|
||||||
|
@ -336,8 +374,8 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
const auto t_main_end = ggml_time_us();
|
const auto t_main_end = ggml_time_us();
|
||||||
|
|
||||||
LOG_TEE("\033[1mClient %3d, seq %4d, prompt %4d t, response %4d t, time %5.2f s, speed %5.2f t/s, cache miss %d \033[0m \n\nInput: %s\nResponse: %s\n\n",
|
LOG_TEE("\033[31mClient %3d, seq %3d/%3d, prompt %4d t, response %4d t, time %5.2f s, speed %5.2f t/s, cache miss %d \033[0m \nInput: %s\n\033[35mResponse: %s\033[0m\n\n",
|
||||||
client.id, client.seq_id, client.n_prompt, client.n_decoded,
|
client.id, client.seq_id, n_seq, client.n_prompt, client.n_decoded,
|
||||||
(t_main_end - client.t_start_prompt) / 1e6,
|
(t_main_end - client.t_start_prompt) / 1e6,
|
||||||
(double) (client.n_prompt + client.n_decoded) / (t_main_end - client.t_start_prompt) * 1e6,
|
(double) (client.n_prompt + client.n_decoded) / (t_main_end - client.t_start_prompt) * 1e6,
|
||||||
n_cache_miss,
|
n_cache_miss,
|
||||||
|
@ -357,13 +395,21 @@ int main(int argc, char ** argv) {
|
||||||
|
|
||||||
const auto t_main_end = ggml_time_us();
|
const auto t_main_end = ggml_time_us();
|
||||||
|
|
||||||
LOG_TEE("\n\n");
|
print_date_time();
|
||||||
|
|
||||||
|
LOG_TEE("\n%s: n_parallel = %d, n_sequences = %d, cont_batching = %d, system tokens = %d\n", __func__, n_clients, n_seq, cont_batching, n_tokens_system);
|
||||||
|
if (params.prompt_file.empty()) {
|
||||||
|
params.prompt_file = "used built-in defaults";
|
||||||
|
}
|
||||||
|
LOG_TEE("External prompt file: \033[32m%s\033[0m\n", params.prompt_file.c_str());
|
||||||
|
LOG_TEE("Model and path used: \033[32m%s\033[0m\n\n", params.model.c_str());
|
||||||
|
|
||||||
LOG_TEE("Total prompt tokens: %6d, speed: %5.2f t/s\n", n_total_prompt, (double) (n_total_prompt ) / (t_main_end - t_main_start) * 1e6);
|
LOG_TEE("Total prompt tokens: %6d, speed: %5.2f t/s\n", n_total_prompt, (double) (n_total_prompt ) / (t_main_end - t_main_start) * 1e6);
|
||||||
LOG_TEE("Total gen tokens: %6d, speed: %5.2f t/s\n", n_total_gen, (double) (n_total_gen ) / (t_main_end - t_main_start) * 1e6);
|
LOG_TEE("Total gen tokens: %6d, speed: %5.2f t/s\n", n_total_gen, (double) (n_total_gen ) / (t_main_end - t_main_start) * 1e6);
|
||||||
LOG_TEE("Total speed (AVG): %6s speed: %5.2f t/s\n", "", (double) (n_total_prompt + n_total_gen) / (t_main_end - t_main_start) * 1e6);
|
LOG_TEE("Total speed (AVG): %6s speed: %5.2f t/s\n", "", (double) (n_total_prompt + n_total_gen) / (t_main_end - t_main_start) * 1e6);
|
||||||
LOG_TEE("Cache misses: %6d\n", n_cache_miss);
|
LOG_TEE("Cache misses: %6d\n", n_cache_miss);
|
||||||
|
|
||||||
LOG_TEE("\n\n");
|
LOG_TEE("\n");
|
||||||
|
|
||||||
llama_print_timings(ctx);
|
llama_print_timings(ctx);
|
||||||
|
|
||||||
|
|
|
@ -114,9 +114,9 @@ node index.js
|
||||||
|
|
||||||
`top_k`: Limit the next token selection to the K most probable tokens (default: 40).
|
`top_k`: Limit the next token selection to the K most probable tokens (default: 40).
|
||||||
|
|
||||||
`top_p`: Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P (default: 0.9).
|
`top_p`: Limit the next token selection to a subset of tokens with a cumulative probability above a threshold P (default: 0.95).
|
||||||
|
|
||||||
`n_predict`: Set the number of tokens to predict when generating text. **Note:** May exceed the set limit slightly if the last token is a partial multibyte character. When 0, no tokens will be generated but the prompt is evaluated into the cache. (default: 128, -1 = infinity).
|
`n_predict`: Set the number of tokens to predict when generating text. **Note:** May exceed the set limit slightly if the last token is a partial multibyte character. When 0, no tokens will be generated but the prompt is evaluated into the cache. (default: -1, -1 = infinity).
|
||||||
|
|
||||||
`n_keep`: Specify the number of tokens from the initial prompt to retain when the model resets its internal context.
|
`n_keep`: Specify the number of tokens from the initial prompt to retain when the model resets its internal context.
|
||||||
By default, this value is set to 0 (meaning no tokens are kept). Use `-1` to retain all tokens from the initial prompt.
|
By default, this value is set to 0 (meaning no tokens are kept). Use `-1` to retain all tokens from the initial prompt.
|
||||||
|
@ -156,6 +156,8 @@ node index.js
|
||||||
|
|
||||||
`logit_bias`: Modify the likelihood of a token appearing in the generated text completion. For example, use `"logit_bias": [[15043,1.0]]` to increase the likelihood of the token 'Hello', or `"logit_bias": [[15043,-1.0]]` to decrease its likelihood. Setting the value to false, `"logit_bias": [[15043,false]]` ensures that the token `Hello` is never produced (default: []).
|
`logit_bias`: Modify the likelihood of a token appearing in the generated text completion. For example, use `"logit_bias": [[15043,1.0]]` to increase the likelihood of the token 'Hello', or `"logit_bias": [[15043,-1.0]]` to decrease its likelihood. Setting the value to false, `"logit_bias": [[15043,false]]` ensures that the token `Hello` is never produced (default: []).
|
||||||
|
|
||||||
|
`n_probs`: If greater than 0, the response also contains the probabilities of top N tokens for each generated token (default: 0)
|
||||||
|
|
||||||
- **POST** `/tokenize`: Tokenize a given text.
|
- **POST** `/tokenize`: Tokenize a given text.
|
||||||
|
|
||||||
*Options:*
|
*Options:*
|
||||||
|
|
|
@ -534,99 +534,21 @@ struct llama_server_context
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
// out of user input, sample next token
|
// out of user input, sample next token
|
||||||
const float temp = params.temp;
|
|
||||||
const int32_t top_k = params.top_k <= 0 ? llama_n_vocab(model) : params.top_k;
|
|
||||||
const float top_p = params.top_p;
|
|
||||||
const float tfs_z = params.tfs_z;
|
|
||||||
const float typical_p = params.typical_p;
|
|
||||||
const int32_t repeat_last_n = params.repeat_last_n < 0 ? n_ctx : params.repeat_last_n;
|
|
||||||
const float repeat_penalty = params.repeat_penalty;
|
|
||||||
const float alpha_presence = params.presence_penalty;
|
|
||||||
const float alpha_frequency = params.frequency_penalty;
|
|
||||||
const int mirostat = params.mirostat;
|
|
||||||
const float mirostat_tau = params.mirostat_tau;
|
|
||||||
const float mirostat_eta = params.mirostat_eta;
|
|
||||||
const bool penalize_nl = params.penalize_nl;
|
|
||||||
const int32_t n_probs = params.n_probs;
|
|
||||||
|
|
||||||
{
|
|
||||||
auto *logits = llama_get_logits(ctx);
|
|
||||||
auto n_vocab = llama_n_vocab(model);
|
|
||||||
|
|
||||||
// Apply params.logit_bias map
|
|
||||||
for (const auto &it : params.logit_bias)
|
|
||||||
{
|
|
||||||
logits[it.first] += it.second;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<llama_token_data> candidates;
|
std::vector<llama_token_data> candidates;
|
||||||
candidates.reserve(n_vocab);
|
candidates.reserve(llama_n_vocab(model));
|
||||||
for (llama_token token_id = 0; token_id < n_vocab; token_id++)
|
|
||||||
{
|
|
||||||
candidates.emplace_back(llama_token_data{token_id, logits[token_id], 0.0f});
|
|
||||||
}
|
|
||||||
|
|
||||||
llama_token_data_array candidates_p = {candidates.data(), candidates.size(), false};
|
result.tok = llama_sample_token(ctx, NULL, grammar, params, last_n_tokens, candidates);
|
||||||
|
|
||||||
// Apply penalties
|
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
||||||
float nl_logit = logits[llama_token_nl(ctx)];
|
|
||||||
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), repeat_last_n), n_ctx);
|
|
||||||
llama_sample_repetition_penalty(ctx, &candidates_p,
|
|
||||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
|
||||||
last_n_repeat, repeat_penalty);
|
|
||||||
llama_sample_frequency_and_presence_penalties(ctx, &candidates_p,
|
|
||||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
|
||||||
last_n_repeat, alpha_frequency, alpha_presence);
|
|
||||||
if (!penalize_nl)
|
|
||||||
{
|
|
||||||
logits[llama_token_nl(ctx)] = nl_logit;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (grammar != nullptr) {
|
const int32_t n_probs = params.n_probs;
|
||||||
llama_sample_grammar(ctx, &candidates_p, grammar);
|
if (params.temp <= 0 && n_probs > 0)
|
||||||
}
|
|
||||||
|
|
||||||
if (temp <= 0)
|
|
||||||
{
|
|
||||||
// Greedy sampling
|
|
||||||
result.tok = llama_sample_token_greedy(ctx, &candidates_p);
|
|
||||||
if (n_probs > 0)
|
|
||||||
{
|
{
|
||||||
|
// For llama_sample_token_greedy we need to sort candidates
|
||||||
llama_sample_softmax(ctx, &candidates_p);
|
llama_sample_softmax(ctx, &candidates_p);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (mirostat == 1)
|
|
||||||
{
|
|
||||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
|
||||||
const int mirostat_m = 100;
|
|
||||||
llama_sample_temp(ctx, &candidates_p, temp);
|
|
||||||
result.tok = llama_sample_token_mirostat(ctx, &candidates_p, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
|
|
||||||
}
|
|
||||||
else if (mirostat == 2)
|
|
||||||
{
|
|
||||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
|
||||||
llama_sample_temp(ctx, &candidates_p, temp);
|
|
||||||
result.tok = llama_sample_token_mirostat_v2(ctx, &candidates_p, mirostat_tau, mirostat_eta, &mirostat_mu);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
// Temperature sampling
|
|
||||||
size_t min_keep = std::max(1, n_probs);
|
|
||||||
llama_sample_top_k(ctx, &candidates_p, top_k, min_keep);
|
|
||||||
llama_sample_tail_free(ctx, &candidates_p, tfs_z, min_keep);
|
|
||||||
llama_sample_typical(ctx, &candidates_p, typical_p, min_keep);
|
|
||||||
llama_sample_top_p(ctx, &candidates_p, top_p, min_keep);
|
|
||||||
llama_sample_temp(ctx, &candidates_p, temp);
|
|
||||||
result.tok = llama_sample_token(ctx, &candidates_p);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (grammar != nullptr) {
|
|
||||||
llama_grammar_accept_token(ctx, grammar, result.tok);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (size_t i = 0; i < std::min(candidates_p.size, (size_t)n_probs); ++i)
|
for (size_t i = 0; i < std::min(candidates_p.size, (size_t)n_probs); ++i)
|
||||||
{
|
{
|
||||||
|
|
269
ggml-metal.m
269
ggml-metal.m
|
@ -81,18 +81,18 @@ struct ggml_metal_context {
|
||||||
GGML_METAL_DECL_KERNEL(get_rows_q6_K);
|
GGML_METAL_DECL_KERNEL(get_rows_q6_K);
|
||||||
GGML_METAL_DECL_KERNEL(rms_norm);
|
GGML_METAL_DECL_KERNEL(rms_norm);
|
||||||
GGML_METAL_DECL_KERNEL(norm);
|
GGML_METAL_DECL_KERNEL(norm);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_f32_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_f32_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_f16_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_f16_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_f16_f32_1row);
|
GGML_METAL_DECL_KERNEL(mul_mv_f16_f32_1row);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_f16_f32_l4);
|
GGML_METAL_DECL_KERNEL(mul_mv_f16_f32_l4);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_q4_0_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_q4_0_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_q4_1_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_q4_1_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_q8_0_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_q8_0_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_q2_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_q2_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_q3_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_q3_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_q4_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_q4_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_q5_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_q5_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mat_q6_K_f32);
|
GGML_METAL_DECL_KERNEL(mul_mv_q6_K_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_f32_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_f32_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_f16_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_f16_f32);
|
||||||
GGML_METAL_DECL_KERNEL(mul_mm_q4_0_f32);
|
GGML_METAL_DECL_KERNEL(mul_mm_q4_0_f32);
|
||||||
|
@ -109,6 +109,8 @@ struct ggml_metal_context {
|
||||||
GGML_METAL_DECL_KERNEL(cpy_f32_f16);
|
GGML_METAL_DECL_KERNEL(cpy_f32_f16);
|
||||||
GGML_METAL_DECL_KERNEL(cpy_f32_f32);
|
GGML_METAL_DECL_KERNEL(cpy_f32_f32);
|
||||||
GGML_METAL_DECL_KERNEL(cpy_f16_f16);
|
GGML_METAL_DECL_KERNEL(cpy_f16_f16);
|
||||||
|
GGML_METAL_DECL_KERNEL(concat);
|
||||||
|
GGML_METAL_DECL_KERNEL(sqr);
|
||||||
|
|
||||||
#undef GGML_METAL_DECL_KERNEL
|
#undef GGML_METAL_DECL_KERNEL
|
||||||
};
|
};
|
||||||
|
@ -183,56 +185,44 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
||||||
|
|
||||||
ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT);
|
ctx->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT);
|
||||||
|
|
||||||
#ifdef GGML_SWIFT
|
// load library
|
||||||
// load the default.metallib file
|
|
||||||
{
|
{
|
||||||
|
NSBundle * bundle = nil;
|
||||||
|
#ifdef SWIFT_PACKAGE
|
||||||
|
bundle = SWIFTPM_MODULE_BUNDLE;
|
||||||
|
#else
|
||||||
|
bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
|
||||||
|
#endif
|
||||||
NSError * error = nil;
|
NSError * error = nil;
|
||||||
|
NSString * libPath = [bundle pathForResource:@"default" ofType:@"metallib"];
|
||||||
NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
|
if (libPath != nil) {
|
||||||
NSString * llamaBundlePath = [bundle pathForResource:@"llama_llama" ofType:@"bundle"];
|
|
||||||
NSBundle * llamaBundle = [NSBundle bundleWithPath:llamaBundlePath];
|
|
||||||
NSString * libPath = [llamaBundle pathForResource:@"default" ofType:@"metallib"];
|
|
||||||
NSURL * libURL = [NSURL fileURLWithPath:libPath];
|
NSURL * libURL = [NSURL fileURLWithPath:libPath];
|
||||||
|
GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [libPath UTF8String]);
|
||||||
// Load the metallib file into a Metal library
|
|
||||||
ctx->library = [ctx->device newLibraryWithURL:libURL error:&error];
|
ctx->library = [ctx->device newLibraryWithURL:libURL error:&error];
|
||||||
|
} else {
|
||||||
|
GGML_METAL_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__);
|
||||||
|
|
||||||
if (error) {
|
NSString * sourcePath = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
|
||||||
GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
|
GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [sourcePath UTF8String]);
|
||||||
return NULL;
|
NSString * src = [NSString stringWithContentsOfFile:sourcePath encoding:NSUTF8StringEncoding error:&error];
|
||||||
}
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
UNUSED(msl_library_source);
|
|
||||||
|
|
||||||
// read the source from "ggml-metal.metal" into a string and use newLibraryWithSource
|
|
||||||
{
|
|
||||||
NSError * error = nil;
|
|
||||||
|
|
||||||
//NSString * path = [[NSBundle mainBundle] pathForResource:@"../../examples/metal/metal" ofType:@"metal"];
|
|
||||||
NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
|
|
||||||
NSString * path = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
|
|
||||||
GGML_METAL_LOG_INFO("%s: loading '%s'\n", __func__, [path UTF8String]);
|
|
||||||
|
|
||||||
NSString * src = [NSString stringWithContentsOfFile:path encoding:NSUTF8StringEncoding error:&error];
|
|
||||||
if (error) {
|
if (error) {
|
||||||
GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
|
GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MTLCompileOptions* options = nil;
|
||||||
#ifdef GGML_QKK_64
|
#ifdef GGML_QKK_64
|
||||||
MTLCompileOptions* options = [MTLCompileOptions new];
|
options = [MTLCompileOptions new];
|
||||||
options.preprocessorMacros = @{ @"QK_K" : @(64) };
|
options.preprocessorMacros = @{ @"QK_K" : @(64) };
|
||||||
ctx->library = [ctx->device newLibraryWithSource:src options:options error:&error];
|
|
||||||
#else
|
|
||||||
ctx->library = [ctx->device newLibraryWithSource:src options:nil error:&error];
|
|
||||||
#endif
|
#endif
|
||||||
|
ctx->library = [ctx->device newLibraryWithSource:src options:options error:&error];
|
||||||
|
}
|
||||||
|
|
||||||
if (error) {
|
if (error) {
|
||||||
GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
|
GGML_METAL_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
// load kernels
|
// load kernels
|
||||||
{
|
{
|
||||||
|
@ -272,18 +262,19 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
||||||
GGML_METAL_ADD_KERNEL(get_rows_q6_K);
|
GGML_METAL_ADD_KERNEL(get_rows_q6_K);
|
||||||
GGML_METAL_ADD_KERNEL(rms_norm);
|
GGML_METAL_ADD_KERNEL(rms_norm);
|
||||||
GGML_METAL_ADD_KERNEL(norm);
|
GGML_METAL_ADD_KERNEL(norm);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_f32_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_f32_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_f16_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_f16_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_f16_f32_1row);
|
GGML_METAL_ADD_KERNEL(mul_mv_f16_f32_1row);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_f16_f32_l4);
|
GGML_METAL_ADD_KERNEL(mul_mv_f16_f32_l4);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_q4_0_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_q4_0_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_q4_1_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_q4_1_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_q8_0_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_q8_0_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_q2_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_q2_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_q3_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_q3_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_q4_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_q4_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_q5_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_q5_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mat_q6_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mv_q6_K_f32);
|
||||||
|
if ([ctx->device supportsFamily:MTLGPUFamilyApple7]) {
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_f32_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_f32_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_f16_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_f16_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_q4_0_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_q4_0_f32);
|
||||||
|
@ -294,18 +285,34 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_q4_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_q4_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_q5_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_q5_K_f32);
|
||||||
GGML_METAL_ADD_KERNEL(mul_mm_q6_K_f32);
|
GGML_METAL_ADD_KERNEL(mul_mm_q6_K_f32);
|
||||||
|
}
|
||||||
GGML_METAL_ADD_KERNEL(rope_f32);
|
GGML_METAL_ADD_KERNEL(rope_f32);
|
||||||
GGML_METAL_ADD_KERNEL(rope_f16);
|
GGML_METAL_ADD_KERNEL(rope_f16);
|
||||||
GGML_METAL_ADD_KERNEL(alibi_f32);
|
GGML_METAL_ADD_KERNEL(alibi_f32);
|
||||||
GGML_METAL_ADD_KERNEL(cpy_f32_f16);
|
GGML_METAL_ADD_KERNEL(cpy_f32_f16);
|
||||||
GGML_METAL_ADD_KERNEL(cpy_f32_f32);
|
GGML_METAL_ADD_KERNEL(cpy_f32_f32);
|
||||||
GGML_METAL_ADD_KERNEL(cpy_f16_f16);
|
GGML_METAL_ADD_KERNEL(cpy_f16_f16);
|
||||||
|
GGML_METAL_ADD_KERNEL(concat);
|
||||||
|
GGML_METAL_ADD_KERNEL(sqr);
|
||||||
|
|
||||||
#undef GGML_METAL_ADD_KERNEL
|
#undef GGML_METAL_ADD_KERNEL
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_METAL_LOG_INFO("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
|
|
||||||
#if TARGET_OS_OSX
|
#if TARGET_OS_OSX
|
||||||
|
// print MTL GPU family:
|
||||||
|
GGML_METAL_LOG_INFO("%s: GPU name: %s\n", __func__, [[ctx->device name] UTF8String]);
|
||||||
|
|
||||||
|
// determine max supported GPU family
|
||||||
|
// https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
|
||||||
|
// https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
|
||||||
|
for (int i = MTLGPUFamilyApple1 + 20; i >= MTLGPUFamilyApple1; --i) {
|
||||||
|
if ([ctx->device supportsFamily:i]) {
|
||||||
|
GGML_METAL_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d (%d)\n", __func__, i - MTLGPUFamilyApple1 + 1, i);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
GGML_METAL_LOG_INFO("%s: hasUnifiedMemory = %s\n", __func__, ctx->device.hasUnifiedMemory ? "true" : "false");
|
||||||
GGML_METAL_LOG_INFO("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
|
GGML_METAL_LOG_INFO("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, ctx->device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
|
||||||
if (ctx->device.maxTransferRate != 0) {
|
if (ctx->device.maxTransferRate != 0) {
|
||||||
GGML_METAL_LOG_INFO("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0);
|
GGML_METAL_LOG_INFO("%s: maxTransferRate = %8.2f MB/s\n", __func__, ctx->device.maxTransferRate / 1024.0 / 1024.0);
|
||||||
|
@ -347,18 +354,19 @@ void ggml_metal_free(struct ggml_metal_context * ctx) {
|
||||||
GGML_METAL_DEL_KERNEL(get_rows_q6_K);
|
GGML_METAL_DEL_KERNEL(get_rows_q6_K);
|
||||||
GGML_METAL_DEL_KERNEL(rms_norm);
|
GGML_METAL_DEL_KERNEL(rms_norm);
|
||||||
GGML_METAL_DEL_KERNEL(norm);
|
GGML_METAL_DEL_KERNEL(norm);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mat_f32_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_f32_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mat_f16_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_f16_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mat_f16_f32_1row);
|
GGML_METAL_DEL_KERNEL(mul_mv_f16_f32_1row);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mat_f16_f32_l4);
|
GGML_METAL_DEL_KERNEL(mul_mv_f16_f32_l4);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mat_q4_0_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_q4_0_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mat_q4_1_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_q4_1_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mat_q8_0_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_q8_0_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mat_q2_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_q2_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mat_q3_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_q3_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mat_q4_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_q4_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mat_q5_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_q5_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mat_q6_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mv_q6_K_f32);
|
||||||
|
if ([ctx->device supportsFamily:MTLGPUFamilyApple7]) {
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_f32_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_f32_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_f16_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_f16_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_q4_0_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_q4_0_f32);
|
||||||
|
@ -369,12 +377,15 @@ void ggml_metal_free(struct ggml_metal_context * ctx) {
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_q4_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_q4_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_q5_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_q5_K_f32);
|
||||||
GGML_METAL_DEL_KERNEL(mul_mm_q6_K_f32);
|
GGML_METAL_DEL_KERNEL(mul_mm_q6_K_f32);
|
||||||
|
}
|
||||||
GGML_METAL_DEL_KERNEL(rope_f32);
|
GGML_METAL_DEL_KERNEL(rope_f32);
|
||||||
GGML_METAL_DEL_KERNEL(rope_f16);
|
GGML_METAL_DEL_KERNEL(rope_f16);
|
||||||
GGML_METAL_DEL_KERNEL(alibi_f32);
|
GGML_METAL_DEL_KERNEL(alibi_f32);
|
||||||
GGML_METAL_DEL_KERNEL(cpy_f32_f16);
|
GGML_METAL_DEL_KERNEL(cpy_f32_f16);
|
||||||
GGML_METAL_DEL_KERNEL(cpy_f32_f32);
|
GGML_METAL_DEL_KERNEL(cpy_f32_f32);
|
||||||
GGML_METAL_DEL_KERNEL(cpy_f16_f16);
|
GGML_METAL_DEL_KERNEL(cpy_f16_f16);
|
||||||
|
GGML_METAL_DEL_KERNEL(concat);
|
||||||
|
GGML_METAL_DEL_KERNEL(sqr);
|
||||||
|
|
||||||
#undef GGML_METAL_DEL_KERNEL
|
#undef GGML_METAL_DEL_KERNEL
|
||||||
|
|
||||||
|
@ -431,7 +442,7 @@ static id<MTLBuffer> ggml_metal_get_buffer(struct ggml_metal_context * ctx, stru
|
||||||
for (int i = 0; i < ctx->n_buffers; ++i) {
|
for (int i = 0; i < ctx->n_buffers; ++i) {
|
||||||
const int64_t ioffs = (int64_t) t->data - (int64_t) ctx->buffers[i].data;
|
const int64_t ioffs = (int64_t) t->data - (int64_t) ctx->buffers[i].data;
|
||||||
|
|
||||||
//metal_printf("ioffs = %10ld, tsize = %10ld, sum = %10ld, ctx->buffers[%d].size = %10ld, name = %s\n", ioffs, tsize, ioffs + tsize, i, ctx->buffers[i].size, ctx->buffers[i].name);
|
//GGML_METAL_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, ctx->buffers[%d].size = %10ld, name = %s\n", ioffs, tsize, ioffs + tsize, i, ctx->buffers[i].size, ctx->buffers[i].name);
|
||||||
if (ioffs >= 0 && ioffs + tsize <= (int64_t) ctx->buffers[i].size) {
|
if (ioffs >= 0 && ioffs + tsize <= (int64_t) ctx->buffers[i].size) {
|
||||||
*offs = (size_t) ioffs;
|
*offs = (size_t) ioffs;
|
||||||
|
|
||||||
|
@ -766,6 +777,43 @@ void ggml_metal_graph_compute(
|
||||||
{
|
{
|
||||||
// noop
|
// noop
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_OP_CONCAT:
|
||||||
|
{
|
||||||
|
|
||||||
|
int64_t nb = ne00;
|
||||||
|
[encoder setComputePipelineState:ctx->pipeline_concat];
|
||||||
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
|
[encoder setBuffer:id_src1 offset:offs_src1 atIndex:1];
|
||||||
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
||||||
|
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
|
||||||
|
[encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
|
||||||
|
[encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
|
||||||
|
[encoder setBytes:&ne03 length:sizeof(ne03) atIndex:6];
|
||||||
|
[encoder setBytes:&nb00 length:sizeof(nb00) atIndex:7];
|
||||||
|
[encoder setBytes:&nb01 length:sizeof(nb01) atIndex:8];
|
||||||
|
[encoder setBytes:&nb02 length:sizeof(nb02) atIndex:9];
|
||||||
|
[encoder setBytes:&nb03 length:sizeof(nb03) atIndex:10];
|
||||||
|
[encoder setBytes:&ne10 length:sizeof(ne10) atIndex:11];
|
||||||
|
[encoder setBytes:&ne11 length:sizeof(ne11) atIndex:12];
|
||||||
|
[encoder setBytes:&ne12 length:sizeof(ne12) atIndex:13];
|
||||||
|
[encoder setBytes:&ne13 length:sizeof(ne13) atIndex:14];
|
||||||
|
[encoder setBytes:&nb10 length:sizeof(nb10) atIndex:15];
|
||||||
|
[encoder setBytes:&nb11 length:sizeof(nb11) atIndex:16];
|
||||||
|
[encoder setBytes:&nb12 length:sizeof(nb12) atIndex:17];
|
||||||
|
[encoder setBytes:&nb13 length:sizeof(nb13) atIndex:18];
|
||||||
|
[encoder setBytes:&ne0 length:sizeof(ne0) atIndex:19];
|
||||||
|
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:20];
|
||||||
|
[encoder setBytes:&ne2 length:sizeof(ne2) atIndex:21];
|
||||||
|
[encoder setBytes:&ne3 length:sizeof(ne3) atIndex:22];
|
||||||
|
[encoder setBytes:&nb0 length:sizeof(nb0) atIndex:23];
|
||||||
|
[encoder setBytes:&nb1 length:sizeof(nb1) atIndex:24];
|
||||||
|
[encoder setBytes:&nb2 length:sizeof(nb2) atIndex:25];
|
||||||
|
[encoder setBytes:&nb3 length:sizeof(nb3) atIndex:26];
|
||||||
|
[encoder setBytes:&nb length:sizeof(nb) atIndex:27];
|
||||||
|
|
||||||
|
const int nth = MIN(1024, ne0);
|
||||||
|
[encoder dispatchThreadgroups:MTLSizeMake(ne1, ne2, ne3) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||||
|
} break;
|
||||||
case GGML_OP_ADD:
|
case GGML_OP_ADD:
|
||||||
{
|
{
|
||||||
GGML_ASSERT(ggml_is_contiguous(src0));
|
GGML_ASSERT(ggml_is_contiguous(src0));
|
||||||
|
@ -903,6 +951,17 @@ void ggml_metal_graph_compute(
|
||||||
GGML_ASSERT(false);
|
GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_OP_SQR:
|
||||||
|
{
|
||||||
|
GGML_ASSERT(ggml_is_contiguous(src0));
|
||||||
|
|
||||||
|
[encoder setComputePipelineState:ctx->pipeline_sqr];
|
||||||
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
||||||
|
|
||||||
|
const int64_t n = ggml_nelements(dst);
|
||||||
|
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
||||||
|
} break;
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
{
|
{
|
||||||
const int nth = MIN(32, ne00);
|
const int nth = MIN(32, ne00);
|
||||||
|
@ -944,21 +1003,46 @@ void ggml_metal_graph_compute(
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_MUL_MAT:
|
case GGML_OP_MUL_MAT:
|
||||||
{
|
{
|
||||||
// TODO: needs to be updated after PR: https://github.com/ggerganov/ggml/pull/224
|
|
||||||
|
|
||||||
GGML_ASSERT(ne00 == ne10);
|
GGML_ASSERT(ne00 == ne10);
|
||||||
// GGML_ASSERT(ne02 == ne12); // Should be checked on individual data types until broadcast is implemented everywhere
|
|
||||||
uint gqa = ne12/ne02;
|
|
||||||
GGML_ASSERT(ne03 == ne13);
|
GGML_ASSERT(ne03 == ne13);
|
||||||
|
|
||||||
|
const uint gqa = ne12/ne02;
|
||||||
|
|
||||||
|
// find the break-even point where the matrix-matrix kernel becomes more efficient compared
|
||||||
|
// to the matrix-vector kernel
|
||||||
|
int ne11_mm_min = 1;
|
||||||
|
|
||||||
|
#if 0
|
||||||
|
// the numbers below are measured on M2 Ultra for 7B and 13B models
|
||||||
|
// these numbers do not translate to other devices or model sizes
|
||||||
|
// TODO: need to find a better approach
|
||||||
|
if ([ctx->device.name isEqualToString:@"Apple M2 Ultra"]) {
|
||||||
|
switch (src0t) {
|
||||||
|
case GGML_TYPE_F16: ne11_mm_min = 2; break;
|
||||||
|
case GGML_TYPE_Q8_0: ne11_mm_min = 7; break;
|
||||||
|
case GGML_TYPE_Q2_K: ne11_mm_min = 15; break;
|
||||||
|
case GGML_TYPE_Q3_K: ne11_mm_min = 7; break;
|
||||||
|
case GGML_TYPE_Q4_0:
|
||||||
|
case GGML_TYPE_Q4_1: ne11_mm_min = 15; break;
|
||||||
|
case GGML_TYPE_Q4_K: ne11_mm_min = 11; break;
|
||||||
|
case GGML_TYPE_Q5_0: // not tested yet
|
||||||
|
case GGML_TYPE_Q5_1: ne11_mm_min = 13; break; // not tested yet
|
||||||
|
case GGML_TYPE_Q5_K: ne11_mm_min = 7; break;
|
||||||
|
case GGML_TYPE_Q6_K: ne11_mm_min = 7; break;
|
||||||
|
default: ne11_mm_min = 1; break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
|
// for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
|
||||||
// AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
|
// AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
|
||||||
if (!ggml_is_transposed(src0) &&
|
if ([ctx->device supportsFamily:MTLGPUFamilyApple7] &&
|
||||||
|
!ggml_is_transposed(src0) &&
|
||||||
!ggml_is_transposed(src1) &&
|
!ggml_is_transposed(src1) &&
|
||||||
src1t == GGML_TYPE_F32 &&
|
src1t == GGML_TYPE_F32 &&
|
||||||
[ctx->device supportsFamily:MTLGPUFamilyApple7] &&
|
ne00 % 32 == 0 &&
|
||||||
ne00%32 == 0 &&
|
ne11 > ne11_mm_min) {
|
||||||
ne11 > 2) {
|
//printf("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
|
||||||
switch (src0->type) {
|
switch (src0->type) {
|
||||||
case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f32_f32]; break;
|
case GGML_TYPE_F32: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f32_f32]; break;
|
||||||
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f16_f32]; break;
|
case GGML_TYPE_F16: [encoder setComputePipelineState:ctx->pipeline_mul_mm_f16_f32]; break;
|
||||||
|
@ -987,17 +1071,18 @@ void ggml_metal_graph_compute(
|
||||||
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:12];
|
[encoder setBytes:&ne1 length:sizeof(ne1) atIndex:12];
|
||||||
[encoder setBytes:&gqa length:sizeof(gqa) atIndex:13];
|
[encoder setBytes:&gqa length:sizeof(gqa) atIndex:13];
|
||||||
[encoder setThreadgroupMemoryLength:8192 atIndex:0];
|
[encoder setThreadgroupMemoryLength:8192 atIndex:0];
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake( (ne11+31)/32, (ne01+63) / 64, ne12) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake( (ne11 + 31)/32, (ne01 + 63)/64, ne12) threadsPerThreadgroup:MTLSizeMake(128, 1, 1)];
|
||||||
} else {
|
} else {
|
||||||
int nth0 = 32;
|
int nth0 = 32;
|
||||||
int nth1 = 1;
|
int nth1 = 1;
|
||||||
int nrows = 1;
|
int nrows = 1;
|
||||||
|
//printf("vector: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
|
||||||
|
|
||||||
// use custom matrix x vector kernel
|
// use custom matrix x vector kernel
|
||||||
switch (src0t) {
|
switch (src0t) {
|
||||||
case GGML_TYPE_F32:
|
case GGML_TYPE_F32:
|
||||||
{
|
{
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mat_f32_f32];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_f32_f32];
|
||||||
nrows = 4;
|
nrows = 4;
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_F16:
|
case GGML_TYPE_F16:
|
||||||
|
@ -1005,12 +1090,12 @@ void ggml_metal_graph_compute(
|
||||||
nth0 = 32;
|
nth0 = 32;
|
||||||
nth1 = 1;
|
nth1 = 1;
|
||||||
if (ne11 * ne12 < 4) {
|
if (ne11 * ne12 < 4) {
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mat_f16_f32_1row];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f32_1row];
|
||||||
} else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) {
|
} else if (ne00 >= 128 && ne01 >= 8 && ne00%4 == 0) {
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mat_f16_f32_l4];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f32_l4];
|
||||||
nrows = ne11;
|
nrows = ne11;
|
||||||
} else {
|
} else {
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mat_f16_f32];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_f16_f32];
|
||||||
nrows = 4;
|
nrows = 4;
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
|
@ -1021,7 +1106,7 @@ void ggml_metal_graph_compute(
|
||||||
|
|
||||||
nth0 = 8;
|
nth0 = 8;
|
||||||
nth1 = 8;
|
nth1 = 8;
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_0_f32];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_q4_0_f32];
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_Q4_1:
|
case GGML_TYPE_Q4_1:
|
||||||
{
|
{
|
||||||
|
@ -1030,7 +1115,7 @@ void ggml_metal_graph_compute(
|
||||||
|
|
||||||
nth0 = 8;
|
nth0 = 8;
|
||||||
nth1 = 8;
|
nth1 = 8;
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_1_f32];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_q4_1_f32];
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_Q8_0:
|
case GGML_TYPE_Q8_0:
|
||||||
{
|
{
|
||||||
|
@ -1039,7 +1124,7 @@ void ggml_metal_graph_compute(
|
||||||
|
|
||||||
nth0 = 8;
|
nth0 = 8;
|
||||||
nth1 = 8;
|
nth1 = 8;
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q8_0_f32];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_q8_0_f32];
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_Q2_K:
|
case GGML_TYPE_Q2_K:
|
||||||
{
|
{
|
||||||
|
@ -1048,7 +1133,7 @@ void ggml_metal_graph_compute(
|
||||||
|
|
||||||
nth0 = 2;
|
nth0 = 2;
|
||||||
nth1 = 32;
|
nth1 = 32;
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q2_K_f32];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_q2_K_f32];
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_Q3_K:
|
case GGML_TYPE_Q3_K:
|
||||||
{
|
{
|
||||||
|
@ -1057,7 +1142,7 @@ void ggml_metal_graph_compute(
|
||||||
|
|
||||||
nth0 = 2;
|
nth0 = 2;
|
||||||
nth1 = 32;
|
nth1 = 32;
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q3_K_f32];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_q3_K_f32];
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_Q4_K:
|
case GGML_TYPE_Q4_K:
|
||||||
{
|
{
|
||||||
|
@ -1066,7 +1151,7 @@ void ggml_metal_graph_compute(
|
||||||
|
|
||||||
nth0 = 4; //1;
|
nth0 = 4; //1;
|
||||||
nth1 = 8; //32;
|
nth1 = 8; //32;
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q4_K_f32];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_q4_K_f32];
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_Q5_K:
|
case GGML_TYPE_Q5_K:
|
||||||
{
|
{
|
||||||
|
@ -1075,7 +1160,7 @@ void ggml_metal_graph_compute(
|
||||||
|
|
||||||
nth0 = 2;
|
nth0 = 2;
|
||||||
nth1 = 32;
|
nth1 = 32;
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q5_K_f32];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_q5_K_f32];
|
||||||
} break;
|
} break;
|
||||||
case GGML_TYPE_Q6_K:
|
case GGML_TYPE_Q6_K:
|
||||||
{
|
{
|
||||||
|
@ -1084,7 +1169,7 @@ void ggml_metal_graph_compute(
|
||||||
|
|
||||||
nth0 = 2;
|
nth0 = 2;
|
||||||
nth1 = 32;
|
nth1 = 32;
|
||||||
[encoder setComputePipelineState:ctx->pipeline_mul_mat_q6_K_f32];
|
[encoder setComputePipelineState:ctx->pipeline_mul_mv_q6_K_f32];
|
||||||
} break;
|
} break;
|
||||||
default:
|
default:
|
||||||
{
|
{
|
||||||
|
@ -1113,7 +1198,7 @@ void ggml_metal_graph_compute(
|
||||||
[encoder setBytes:&gqa length:sizeof(gqa) atIndex:17];
|
[encoder setBytes:&gqa length:sizeof(gqa) atIndex:17];
|
||||||
|
|
||||||
if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q8_0 ||
|
if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q8_0 ||
|
||||||
src0t == GGML_TYPE_Q2_K) {// || src0t == GGML_TYPE_Q4_K) {
|
src0t == GGML_TYPE_Q2_K) { // || src0t == GGML_TYPE_Q4_K) {
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
|
||||||
}
|
}
|
||||||
else if (src0t == GGML_TYPE_Q4_K) {
|
else if (src0t == GGML_TYPE_Q4_K) {
|
||||||
|
|
139
ggml-metal.metal
139
ggml-metal.metal
|
@ -132,6 +132,13 @@ kernel void kernel_relu(
|
||||||
dst[tpig] = max(0.0f, src0[tpig]);
|
dst[tpig] = max(0.0f, src0[tpig]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kernel void kernel_sqr(
|
||||||
|
device const float * src0,
|
||||||
|
device float * dst,
|
||||||
|
uint tpig[[thread_position_in_grid]]) {
|
||||||
|
dst[tpig] = src0[tpig] * src0[tpig];
|
||||||
|
}
|
||||||
|
|
||||||
constant float GELU_COEF_A = 0.044715f;
|
constant float GELU_COEF_A = 0.044715f;
|
||||||
constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
|
constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
|
||||||
|
|
||||||
|
@ -428,18 +435,23 @@ void mul_vec_q_n_f32(device const void * src0, device const float * src1, device
|
||||||
int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne10, int64_t ne12, int64_t ne0, int64_t ne1, uint gqa,
|
int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne10, int64_t ne12, int64_t ne0, int64_t ne1, uint gqa,
|
||||||
uint3 tgpig, uint tiisg, uint sgitg) {
|
uint3 tgpig, uint tiisg, uint sgitg) {
|
||||||
const int nb = ne00/QK4_0;
|
const int nb = ne00/QK4_0;
|
||||||
|
|
||||||
const int r0 = tgpig.x;
|
const int r0 = tgpig.x;
|
||||||
const int r1 = tgpig.y;
|
const int r1 = tgpig.y;
|
||||||
const int im = tgpig.z;
|
const int im = tgpig.z;
|
||||||
|
|
||||||
const int first_row = (r0 * nsg + sgitg) * nr;
|
const int first_row = (r0 * nsg + sgitg) * nr;
|
||||||
|
|
||||||
const uint offset0 = first_row * nb + im/gqa*(nb*ne0);
|
const uint offset0 = first_row * nb + im/gqa*(nb*ne0);
|
||||||
|
|
||||||
device const block_q_type * x = (device const block_q_type *) src0 + offset0;
|
device const block_q_type * x = (device const block_q_type *) src0 + offset0;
|
||||||
device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
|
device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
|
||||||
float yl[16]; // src1 vector cache
|
|
||||||
float sumf[nr]={0.f};
|
|
||||||
|
|
||||||
const int ix = tiisg/2;
|
float yl[16]; // src1 vector cache
|
||||||
const int il = 8*(tiisg%2);
|
float sumf[nr] = {0.f};
|
||||||
|
|
||||||
|
const int ix = (tiisg/2);
|
||||||
|
const int il = (tiisg%2)*8;
|
||||||
|
|
||||||
device const float * yb = y + ix * QK4_0 + il;
|
device const float * yb = y + ix * QK4_0 + il;
|
||||||
|
|
||||||
|
@ -450,6 +462,7 @@ void mul_vec_q_n_f32(device const void * src0, device const float * src1, device
|
||||||
sumy += yb[i] + yb[i+1];
|
sumy += yb[i] + yb[i+1];
|
||||||
yl[i+0] = yb[i+ 0];
|
yl[i+0] = yb[i+ 0];
|
||||||
yl[i+1] = yb[i+ 1]/256.f;
|
yl[i+1] = yb[i+ 1]/256.f;
|
||||||
|
|
||||||
sumy += yb[i+16] + yb[i+17];
|
sumy += yb[i+16] + yb[i+17];
|
||||||
yl[i+8] = yb[i+16]/16.f;
|
yl[i+8] = yb[i+16]/16.f;
|
||||||
yl[i+9] = yb[i+17]/4096.f;
|
yl[i+9] = yb[i+17]/4096.f;
|
||||||
|
@ -465,12 +478,12 @@ void mul_vec_q_n_f32(device const void * src0, device const float * src1, device
|
||||||
for (int row = 0; row < nr; ++row) {
|
for (int row = 0; row < nr; ++row) {
|
||||||
const float tot = simd_sum(sumf[row]);
|
const float tot = simd_sum(sumf[row]);
|
||||||
if (tiisg == 0 && first_row + row < ne01) {
|
if (tiisg == 0 && first_row + row < ne01) {
|
||||||
dst[r1*ne0 + im*ne0*ne1 + first_row + row] = tot;
|
dst[im*ne0*ne1 + r1*ne0 + first_row + row] = tot;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kernel void kernel_mul_mat_q4_0_f32(
|
kernel void kernel_mul_mv_q4_0_f32(
|
||||||
device const void * src0,
|
device const void * src0,
|
||||||
device const float * src1,
|
device const float * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -488,7 +501,7 @@ kernel void kernel_mul_mat_q4_0_f32(
|
||||||
mul_vec_q_n_f32<block_q4_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,gqa,tgpig,tiisg,sgitg);
|
mul_vec_q_n_f32<block_q4_0, N_DST, N_SIMDGROUP, N_SIMDWIDTH>(src0,src1,dst,ne00,ne01,ne02,ne10,ne12,ne0,ne1,gqa,tgpig,tiisg,sgitg);
|
||||||
}
|
}
|
||||||
|
|
||||||
kernel void kernel_mul_mat_q4_1_f32(
|
kernel void kernel_mul_mv_q4_1_f32(
|
||||||
device const void * src0,
|
device const void * src0,
|
||||||
device const float * src1,
|
device const float * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -508,7 +521,7 @@ kernel void kernel_mul_mat_q4_1_f32(
|
||||||
|
|
||||||
#define NB_Q8_0 8
|
#define NB_Q8_0 8
|
||||||
|
|
||||||
kernel void kernel_mul_mat_q8_0_f32(
|
kernel void kernel_mul_mv_q8_0_f32(
|
||||||
device const void * src0,
|
device const void * src0,
|
||||||
device const float * src1,
|
device const float * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -572,7 +585,7 @@ kernel void kernel_mul_mat_q8_0_f32(
|
||||||
|
|
||||||
#define N_F32_F32 4
|
#define N_F32_F32 4
|
||||||
|
|
||||||
kernel void kernel_mul_mat_f32_f32(
|
kernel void kernel_mul_mv_f32_f32(
|
||||||
device const char * src0,
|
device const char * src0,
|
||||||
device const char * src1,
|
device const char * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -643,7 +656,7 @@ kernel void kernel_mul_mat_f32_f32(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kernel void kernel_mul_mat_f16_f32_1row(
|
kernel void kernel_mul_mv_f16_f32_1row(
|
||||||
device const char * src0,
|
device const char * src0,
|
||||||
device const char * src1,
|
device const char * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -697,7 +710,7 @@ kernel void kernel_mul_mat_f16_f32_1row(
|
||||||
|
|
||||||
#define N_F16_F32 4
|
#define N_F16_F32 4
|
||||||
|
|
||||||
kernel void kernel_mul_mat_f16_f32(
|
kernel void kernel_mul_mv_f16_f32(
|
||||||
device const char * src0,
|
device const char * src0,
|
||||||
device const char * src1,
|
device const char * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -769,7 +782,7 @@ kernel void kernel_mul_mat_f16_f32(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Assumes row size (ne00) is a multiple of 4
|
// Assumes row size (ne00) is a multiple of 4
|
||||||
kernel void kernel_mul_mat_f16_f32_l4(
|
kernel void kernel_mul_mv_f16_f32_l4(
|
||||||
device const char * src0,
|
device const char * src0,
|
||||||
device const char * src1,
|
device const char * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -1098,6 +1111,62 @@ kernel void kernel_cpy_f32_f32(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kernel void kernel_concat(
|
||||||
|
device const char * src0,
|
||||||
|
device const char * src1,
|
||||||
|
device char * dst,
|
||||||
|
constant int64_t & ne00,
|
||||||
|
constant int64_t & ne01,
|
||||||
|
constant int64_t & ne02,
|
||||||
|
constant int64_t & ne03,
|
||||||
|
constant uint64_t & nb00,
|
||||||
|
constant uint64_t & nb01,
|
||||||
|
constant uint64_t & nb02,
|
||||||
|
constant uint64_t & nb03,
|
||||||
|
constant int64_t & ne10,
|
||||||
|
constant int64_t & ne11,
|
||||||
|
constant int64_t & ne12,
|
||||||
|
constant int64_t & ne13,
|
||||||
|
constant uint64_t & nb10,
|
||||||
|
constant uint64_t & nb11,
|
||||||
|
constant uint64_t & nb12,
|
||||||
|
constant uint64_t & nb13,
|
||||||
|
constant int64_t & ne0,
|
||||||
|
constant int64_t & ne1,
|
||||||
|
constant int64_t & ne2,
|
||||||
|
constant int64_t & ne3,
|
||||||
|
constant uint64_t & nb0,
|
||||||
|
constant uint64_t & nb1,
|
||||||
|
constant uint64_t & nb2,
|
||||||
|
constant uint64_t & nb3,
|
||||||
|
uint3 tgpig[[threadgroup_position_in_grid]],
|
||||||
|
uint3 tpitg[[thread_position_in_threadgroup]],
|
||||||
|
uint3 ntg[[threads_per_threadgroup]]) {
|
||||||
|
|
||||||
|
const int64_t i03 = tgpig.z;
|
||||||
|
const int64_t i02 = tgpig.y;
|
||||||
|
const int64_t i01 = tgpig.x;
|
||||||
|
|
||||||
|
const int64_t i13 = i03 % ne13;
|
||||||
|
const int64_t i12 = i02 % ne12;
|
||||||
|
const int64_t i11 = i01 % ne11;
|
||||||
|
|
||||||
|
device const char * src0_ptr = src0 + i03 * nb03 + i02 * nb02 + i01 * nb01 + tpitg.x*nb00;
|
||||||
|
device const char * src1_ptr = src1 + i13*nb13 + i12*nb12 + i11*nb11 + tpitg.x*nb10;
|
||||||
|
device char * dst_ptr = dst + i03*nb3 + i02*nb2 + i01*nb1 + tpitg.x*nb0;
|
||||||
|
|
||||||
|
for (int i0 = tpitg.x; i0 < ne0; i0 += ntg.x) {
|
||||||
|
if (i02 < ne02) {
|
||||||
|
((device float *)dst_ptr)[0] = ((device float *)src0_ptr)[0];
|
||||||
|
src0_ptr += ntg.x*nb00;
|
||||||
|
} else {
|
||||||
|
((device float *)dst_ptr)[0] = ((device float *)src1_ptr)[0];
|
||||||
|
src1_ptr += ntg.x*nb10;
|
||||||
|
}
|
||||||
|
dst_ptr += ntg.x*nb0;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//============================================ k-quants ======================================================
|
//============================================ k-quants ======================================================
|
||||||
|
|
||||||
#ifndef QK_K
|
#ifndef QK_K
|
||||||
|
@ -1190,7 +1259,7 @@ static inline uchar4 get_scale_min_k4(int j, device const uint8_t * q) {
|
||||||
|
|
||||||
//====================================== dot products =========================
|
//====================================== dot products =========================
|
||||||
|
|
||||||
kernel void kernel_mul_mat_q2_K_f32(
|
kernel void kernel_mul_mv_q2_K_f32(
|
||||||
device const void * src0,
|
device const void * src0,
|
||||||
device const float * src1,
|
device const float * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -1334,7 +1403,7 @@ kernel void kernel_mul_mat_q2_K_f32(
|
||||||
}
|
}
|
||||||
|
|
||||||
#if QK_K == 256
|
#if QK_K == 256
|
||||||
kernel void kernel_mul_mat_q3_K_f32(
|
kernel void kernel_mul_mv_q3_K_f32(
|
||||||
device const void * src0,
|
device const void * src0,
|
||||||
device const float * src1,
|
device const float * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -1486,7 +1555,7 @@ kernel void kernel_mul_mat_q3_K_f32(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
kernel void kernel_mul_mat_q3_K_f32(
|
kernel void kernel_mul_mv_q3_K_f32(
|
||||||
device const void * src0,
|
device const void * src0,
|
||||||
device const float * src1,
|
device const float * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -1557,7 +1626,7 @@ kernel void kernel_mul_mat_q3_K_f32(
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if QK_K == 256
|
#if QK_K == 256
|
||||||
kernel void kernel_mul_mat_q4_K_f32(
|
kernel void kernel_mul_mv_q4_K_f32(
|
||||||
device const void * src0,
|
device const void * src0,
|
||||||
device const float * src1,
|
device const float * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -1663,7 +1732,7 @@ kernel void kernel_mul_mat_q4_K_f32(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
kernel void kernel_mul_mat_q4_K_f32(
|
kernel void kernel_mul_mv_q4_K_f32(
|
||||||
device const void * src0,
|
device const void * src0,
|
||||||
device const float * src1,
|
device const float * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -1752,7 +1821,7 @@ kernel void kernel_mul_mat_q4_K_f32(
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
kernel void kernel_mul_mat_q5_K_f32(
|
kernel void kernel_mul_mv_q5_K_f32(
|
||||||
device const void * src0,
|
device const void * src0,
|
||||||
device const float * src1,
|
device const float * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -1925,7 +1994,7 @@ kernel void kernel_mul_mat_q5_K_f32(
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
kernel void kernel_mul_mat_q6_K_f32(
|
kernel void kernel_mul_mv_q6_K_f32(
|
||||||
device const void * src0,
|
device const void * src0,
|
||||||
device const float * src1,
|
device const float * src1,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -2263,7 +2332,7 @@ kernel void kernel_get_rows(
|
||||||
}
|
}
|
||||||
|
|
||||||
#define BLOCK_SIZE_M 64 // 8 simdgroup matrices from matrix A
|
#define BLOCK_SIZE_M 64 // 8 simdgroup matrices from matrix A
|
||||||
#define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix A
|
#define BLOCK_SIZE_N 32 // 4 simdgroup matrices from matrix B
|
||||||
#define BLOCK_SIZE_K 32
|
#define BLOCK_SIZE_K 32
|
||||||
#define THREAD_MAT_M 4 // each thread take 4 simdgroup matrices from matrix A
|
#define THREAD_MAT_M 4 // each thread take 4 simdgroup matrices from matrix A
|
||||||
#define THREAD_MAT_N 2 // each thread take 2 simdgroup matrices from matrix B
|
#define THREAD_MAT_N 2 // each thread take 2 simdgroup matrices from matrix B
|
||||||
|
@ -2300,9 +2369,11 @@ kernel void kernel_mul_mm(device const uchar * src0,
|
||||||
const uint r0 = tgpig.y;
|
const uint r0 = tgpig.y;
|
||||||
const uint r1 = tgpig.x;
|
const uint r1 = tgpig.x;
|
||||||
const uint im = tgpig.z;
|
const uint im = tgpig.z;
|
||||||
|
|
||||||
// if this block is of 64x32 shape or smaller
|
// if this block is of 64x32 shape or smaller
|
||||||
short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M;
|
short n_rows = (ne0 - r0 * BLOCK_SIZE_M < BLOCK_SIZE_M) ? (ne0 - r0 * BLOCK_SIZE_M) : BLOCK_SIZE_M;
|
||||||
short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N;
|
short n_cols = (ne1 - r1 * BLOCK_SIZE_N < BLOCK_SIZE_N) ? (ne1 - r1 * BLOCK_SIZE_N) : BLOCK_SIZE_N;
|
||||||
|
|
||||||
// a thread shouldn't load data outside of the matrix
|
// a thread shouldn't load data outside of the matrix
|
||||||
short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1;
|
short thread_row = ((short)tiitg/THREAD_PER_ROW) < n_rows ? ((short)tiitg/THREAD_PER_ROW) : n_rows - 1;
|
||||||
short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1;
|
short thread_col = ((short)tiitg/THREAD_PER_COL) < n_cols ? ((short)tiitg/THREAD_PER_COL) : n_cols - 1;
|
||||||
|
@ -2326,26 +2397,30 @@ kernel void kernel_mul_mm(device const uchar * src0,
|
||||||
+ nb10 * (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL)));
|
+ nb10 * (BLOCK_SIZE_K / THREAD_PER_COL * (tiitg % THREAD_PER_COL)));
|
||||||
|
|
||||||
for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) {
|
for (int loop_k = 0; loop_k < ne00; loop_k += BLOCK_SIZE_K) {
|
||||||
//load data and store to threadgroup memory
|
// load data and store to threadgroup memory
|
||||||
half4x4 temp_a;
|
half4x4 temp_a;
|
||||||
dequantize_func(x, il, temp_a);
|
dequantize_func(x, il, temp_a);
|
||||||
threadgroup_barrier(mem_flags::mem_threadgroup);
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
|
|
||||||
#pragma unroll(16)
|
#pragma unroll(16)
|
||||||
for (int i = 0; i < 16; i++) {
|
for (int i = 0; i < 16; i++) {
|
||||||
*(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \
|
*(sa + SG_MAT_SIZE * ((tiitg / THREAD_PER_ROW / 8) \
|
||||||
+ 16 * (tiitg % THREAD_PER_ROW) + 8 * (i / 8)) \
|
+ (tiitg % THREAD_PER_ROW) * 16 + (i / 8) * 8) \
|
||||||
+ (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4];
|
+ (tiitg / THREAD_PER_ROW) % 8 + (i & 7) * 8) = temp_a[i/4][i%4];
|
||||||
}
|
}
|
||||||
*(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) \
|
|
||||||
= *((device float2x4 *)y);
|
*(threadgroup float2x4 *)(sb + (tiitg % THREAD_PER_COL) * 8 * 32 + 8 * (tiitg / THREAD_PER_COL)) = *((device float2x4 *)y);
|
||||||
|
|
||||||
il = (il + 2 < nl) ? il + 2 : il % 2;
|
il = (il + 2 < nl) ? il + 2 : il % 2;
|
||||||
x = (il < 2) ? x + (2+nl-1)/nl : x;
|
x = (il < 2) ? x + (2+nl-1)/nl : x;
|
||||||
y += BLOCK_SIZE_K;
|
y += BLOCK_SIZE_K;
|
||||||
|
|
||||||
threadgroup_barrier(mem_flags::mem_threadgroup);
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
//load matrices from threadgroup memory and conduct outer products
|
|
||||||
|
// load matrices from threadgroup memory and conduct outer products
|
||||||
threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2));
|
threadgroup half * lsma = (sa + THREAD_MAT_M * SG_MAT_SIZE * (sgitg % 2));
|
||||||
threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2));
|
threadgroup float * lsmb = (sb + THREAD_MAT_N * SG_MAT_SIZE * (sgitg / 2));
|
||||||
|
|
||||||
#pragma unroll(4)
|
#pragma unroll(4)
|
||||||
for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) {
|
for (int ik = 0; ik < BLOCK_SIZE_K / 8; ik++) {
|
||||||
#pragma unroll(4)
|
#pragma unroll(4)
|
||||||
|
@ -2360,6 +2435,7 @@ kernel void kernel_mul_mm(device const uchar * src0,
|
||||||
|
|
||||||
lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE;
|
lsma += BLOCK_SIZE_M / SG_MAT_ROW * SG_MAT_SIZE;
|
||||||
lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE;
|
lsmb += BLOCK_SIZE_N / SG_MAT_ROW * SG_MAT_SIZE;
|
||||||
|
|
||||||
#pragma unroll(8)
|
#pragma unroll(8)
|
||||||
for (int i = 0; i < 8; i++){
|
for (int i = 0; i < 8; i++){
|
||||||
simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]);
|
simdgroup_multiply_accumulate(c_res[i], mb[i/4], ma[i%4], c_res[i]);
|
||||||
|
@ -2368,25 +2444,26 @@ kernel void kernel_mul_mm(device const uchar * src0,
|
||||||
}
|
}
|
||||||
|
|
||||||
if ((r0 + 1) * BLOCK_SIZE_M <= ne0 && (r1 + 1) * BLOCK_SIZE_N <= ne1) {
|
if ((r0 + 1) * BLOCK_SIZE_M <= ne0 && (r1 + 1) * BLOCK_SIZE_N <= ne1) {
|
||||||
device float *C = dst + BLOCK_SIZE_M * r0 + 32 * (sgitg&1) \
|
device float * C = dst + (BLOCK_SIZE_M * r0 + 32 * (sgitg & 1)) \
|
||||||
+ (BLOCK_SIZE_N * r1 + 16 * (sgitg>>1)) * ne0 + im*ne1*ne0;
|
+ (BLOCK_SIZE_N * r1 + 16 * (sgitg >> 1)) * ne0 + im*ne1*ne0;
|
||||||
for (int i = 0; i < 8; i++) {
|
for (int i = 0; i < 8; i++) {
|
||||||
simdgroup_store(c_res[i], C + 8 * (i%4) + 8 * ne0 * (i/4), ne0);
|
simdgroup_store(c_res[i], C + 8 * (i%4) + 8 * ne0 * (i/4), ne0);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// block is smaller than 64x32, we should avoid writing data outside of the matrix
|
// block is smaller than 64x32, we should avoid writing data outside of the matrix
|
||||||
threadgroup_barrier(mem_flags::mem_threadgroup);
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
threadgroup float *temp_str = ((threadgroup float *)shared_memory) \
|
threadgroup float * temp_str = ((threadgroup float *)shared_memory) \
|
||||||
+ 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M;
|
+ 32 * (sgitg&1) + (16 * (sgitg>>1)) * BLOCK_SIZE_M;
|
||||||
for (int i = 0; i < 8; i++) {
|
for (int i = 0; i < 8; i++) {
|
||||||
simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M);
|
simdgroup_store(c_res[i], temp_str + 8 * (i%4) + 8 * BLOCK_SIZE_M * (i/4), BLOCK_SIZE_M);
|
||||||
}
|
}
|
||||||
|
|
||||||
threadgroup_barrier(mem_flags::mem_threadgroup);
|
threadgroup_barrier(mem_flags::mem_threadgroup);
|
||||||
device float *C = dst + BLOCK_SIZE_M * r0 + (BLOCK_SIZE_N * r1) * ne0 + im*ne1*ne0;
|
|
||||||
if (sgitg==0) {
|
device float * C = dst + (BLOCK_SIZE_M * r0) + (BLOCK_SIZE_N * r1) * ne0 + im*ne1*ne0;
|
||||||
|
if (sgitg == 0) {
|
||||||
for (int i = 0; i < n_rows; i++) {
|
for (int i = 0; i < n_rows; i++) {
|
||||||
for (int j = tiitg; j< n_cols; j += BLOCK_SIZE_N) {
|
for (int j = tiitg; j < n_cols; j += BLOCK_SIZE_N) {
|
||||||
*(C + i + j * ne0) = *(temp_str + i + j * BLOCK_SIZE_M);
|
*(C + i + j * ne0) = *(temp_str + i + j * BLOCK_SIZE_M);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -69,4 +69,3 @@ python -m twine upload dist/*
|
||||||
## TODO
|
## TODO
|
||||||
- [ ] Add tests
|
- [ ] Add tests
|
||||||
- [ ] Include conversion scripts as command line entry points in this package.
|
- [ ] Include conversion scripts as command line entry points in this package.
|
||||||
- Add CI workflow for releasing the package.
|
|
||||||
|
|
|
@ -85,6 +85,7 @@ class MODEL_ARCH(IntEnum):
|
||||||
GPTNEOX : int = auto()
|
GPTNEOX : int = auto()
|
||||||
MPT : int = auto()
|
MPT : int = auto()
|
||||||
STARCODER : int = auto()
|
STARCODER : int = auto()
|
||||||
|
PERSIMMON : int = auto()
|
||||||
REFACT : int = auto()
|
REFACT : int = auto()
|
||||||
BERT : int = auto()
|
BERT : int = auto()
|
||||||
|
|
||||||
|
@ -108,6 +109,8 @@ class MODEL_TENSOR(IntEnum):
|
||||||
FFN_DOWN : int = auto()
|
FFN_DOWN : int = auto()
|
||||||
FFN_UP : int = auto()
|
FFN_UP : int = auto()
|
||||||
FFN_NORM : int = auto()
|
FFN_NORM : int = auto()
|
||||||
|
ATTN_Q_NORM : int = auto()
|
||||||
|
ATTN_K_NORM : int = auto()
|
||||||
|
|
||||||
|
|
||||||
MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
||||||
|
@ -119,6 +122,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
||||||
MODEL_ARCH.GPTNEOX: "gptneox",
|
MODEL_ARCH.GPTNEOX: "gptneox",
|
||||||
MODEL_ARCH.MPT: "mpt",
|
MODEL_ARCH.MPT: "mpt",
|
||||||
MODEL_ARCH.STARCODER: "starcoder",
|
MODEL_ARCH.STARCODER: "starcoder",
|
||||||
|
MODEL_ARCH.PERSIMMON: "persimmon",
|
||||||
MODEL_ARCH.REFACT: "refact",
|
MODEL_ARCH.REFACT: "refact",
|
||||||
MODEL_ARCH.BERT: "bert",
|
MODEL_ARCH.BERT: "bert",
|
||||||
}
|
}
|
||||||
|
@ -130,7 +134,6 @@ TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
|
||||||
MODEL_TENSOR.OUTPUT_NORM: "output_norm",
|
MODEL_TENSOR.OUTPUT_NORM: "output_norm",
|
||||||
MODEL_TENSOR.OUTPUT: "output",
|
MODEL_TENSOR.OUTPUT: "output",
|
||||||
MODEL_TENSOR.ROPE_FREQS: "rope_freqs",
|
MODEL_TENSOR.ROPE_FREQS: "rope_freqs",
|
||||||
|
|
||||||
MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm",
|
MODEL_TENSOR.ATTN_NORM: "blk.{bid}.attn_norm",
|
||||||
MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2",
|
MODEL_TENSOR.ATTN_NORM_2: "blk.{bid}.attn_norm_2",
|
||||||
MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv",
|
MODEL_TENSOR.ATTN_QKV: "blk.{bid}.attn_qkv",
|
||||||
|
@ -139,6 +142,8 @@ TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
|
||||||
MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v",
|
MODEL_TENSOR.ATTN_V: "blk.{bid}.attn_v",
|
||||||
MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output",
|
MODEL_TENSOR.ATTN_OUT: "blk.{bid}.attn_output",
|
||||||
MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd",
|
MODEL_TENSOR.ATTN_ROT_EMBD: "blk.{bid}.attn_rot_embd",
|
||||||
|
MODEL_TENSOR.ATTN_Q_NORM: "blk.{bid}.attn_q_norm",
|
||||||
|
MODEL_TENSOR.ATTN_K_NORM: "blk.{bid}.attn_k_norm",
|
||||||
MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm",
|
MODEL_TENSOR.FFN_NORM: "blk.{bid}.ffn_norm",
|
||||||
MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate",
|
MODEL_TENSOR.FFN_GATE: "blk.{bid}.ffn_gate",
|
||||||
MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
|
MODEL_TENSOR.FFN_DOWN: "blk.{bid}.ffn_down",
|
||||||
|
@ -249,6 +254,20 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
||||||
MODEL_TENSOR.FFN_DOWN,
|
MODEL_TENSOR.FFN_DOWN,
|
||||||
MODEL_TENSOR.FFN_UP,
|
MODEL_TENSOR.FFN_UP,
|
||||||
],
|
],
|
||||||
|
MODEL_ARCH.PERSIMMON: [
|
||||||
|
MODEL_TENSOR.TOKEN_EMBD,
|
||||||
|
MODEL_TENSOR.OUTPUT,
|
||||||
|
MODEL_TENSOR.OUTPUT_NORM,
|
||||||
|
MODEL_TENSOR.ATTN_NORM,
|
||||||
|
MODEL_TENSOR.ATTN_QKV,
|
||||||
|
MODEL_TENSOR.ATTN_OUT,
|
||||||
|
MODEL_TENSOR.FFN_NORM,
|
||||||
|
MODEL_TENSOR.FFN_DOWN,
|
||||||
|
MODEL_TENSOR.FFN_UP,
|
||||||
|
MODEL_TENSOR.ATTN_Q_NORM,
|
||||||
|
MODEL_TENSOR.ATTN_K_NORM,
|
||||||
|
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||||
|
],
|
||||||
MODEL_ARCH.REFACT: [
|
MODEL_ARCH.REFACT: [
|
||||||
MODEL_TENSOR.TOKEN_EMBD,
|
MODEL_TENSOR.TOKEN_EMBD,
|
||||||
MODEL_TENSOR.OUTPUT_NORM,
|
MODEL_TENSOR.OUTPUT_NORM,
|
||||||
|
@ -279,6 +298,9 @@ MODEL_TENSOR_SKIP: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
||||||
MODEL_TENSOR.ROPE_FREQS,
|
MODEL_TENSOR.ROPE_FREQS,
|
||||||
MODEL_TENSOR.ATTN_ROT_EMBD,
|
MODEL_TENSOR.ATTN_ROT_EMBD,
|
||||||
],
|
],
|
||||||
|
MODEL_ARCH.PERSIMMON: [
|
||||||
|
MODEL_TENSOR.ROPE_FREQS,
|
||||||
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -292,6 +314,7 @@ class TensorNameMap:
|
||||||
"model.embed_tokens", # llama-hf
|
"model.embed_tokens", # llama-hf
|
||||||
"tok_embeddings", # llama-pth
|
"tok_embeddings", # llama-pth
|
||||||
"embeddings.word_embeddings", # bert
|
"embeddings.word_embeddings", # bert
|
||||||
|
"language_model.embedding.word_embeddings", # persimmon
|
||||||
),
|
),
|
||||||
|
|
||||||
# Token type embeddings
|
# Token type embeddings
|
||||||
|
@ -308,8 +331,9 @@ class TensorNameMap:
|
||||||
# Output
|
# Output
|
||||||
MODEL_TENSOR.OUTPUT: (
|
MODEL_TENSOR.OUTPUT: (
|
||||||
"embed_out", # gptneox
|
"embed_out", # gptneox
|
||||||
"lm_head", # gpt2 gpt-j mpt falcon llama-hf baichuan
|
"lm_head", # gpt2 mpt falcon llama-hf baichuan
|
||||||
"output", # llama-pth
|
"output", # llama-pth
|
||||||
|
"word_embeddings_for_head", # persimmon
|
||||||
),
|
),
|
||||||
|
|
||||||
# Output norm
|
# Output norm
|
||||||
|
@ -321,6 +345,7 @@ class TensorNameMap:
|
||||||
"embeddings.LayerNorm", # bert
|
"embeddings.LayerNorm", # bert
|
||||||
"transformer.norm_f", # mpt
|
"transformer.norm_f", # mpt
|
||||||
"ln_f", # refact
|
"ln_f", # refact
|
||||||
|
"language_model.encoder.final_layernorm", # persimmon
|
||||||
),
|
),
|
||||||
|
|
||||||
# Rope frequencies
|
# Rope frequencies
|
||||||
|
@ -340,6 +365,7 @@ class TensorNameMap:
|
||||||
"model.layers.{bid}.input_layernorm", # llama-hf
|
"model.layers.{bid}.input_layernorm", # llama-hf
|
||||||
"layers.{bid}.attention_norm", # llama-pth
|
"layers.{bid}.attention_norm", # llama-pth
|
||||||
"encoder.layer.{bid}.attention.output.LayerNorm", # bert
|
"encoder.layer.{bid}.attention.output.LayerNorm", # bert
|
||||||
|
"language_model.encoder.layers.{bid}.input_layernorm", # persimmon
|
||||||
),
|
),
|
||||||
|
|
||||||
# Attention norm 2
|
# Attention norm 2
|
||||||
|
@ -353,6 +379,7 @@ class TensorNameMap:
|
||||||
"transformer.h.{bid}.attn.c_attn", # gpt2
|
"transformer.h.{bid}.attn.c_attn", # gpt2
|
||||||
"transformer.blocks.{bid}.attn.Wqkv", # mpt
|
"transformer.blocks.{bid}.attn.Wqkv", # mpt
|
||||||
"transformer.h.{bid}.self_attention.query_key_value", # falcon
|
"transformer.h.{bid}.self_attention.query_key_value", # falcon
|
||||||
|
"language_model.encoder.layers.{bid}.self_attention.query_key_value", # persimmon
|
||||||
),
|
),
|
||||||
|
|
||||||
# Attention query
|
# Attention query
|
||||||
|
@ -389,6 +416,7 @@ class TensorNameMap:
|
||||||
"layers.{bid}.attention.wo", # llama-pth
|
"layers.{bid}.attention.wo", # llama-pth
|
||||||
"encoder.layer.{bid}.attention.output.dense", # bert
|
"encoder.layer.{bid}.attention.output.dense", # bert
|
||||||
"transformer.h.{bid}.attn.out_proj", # gpt-j
|
"transformer.h.{bid}.attn.out_proj", # gpt-j
|
||||||
|
"language_model.encoder.layers.{bid}.self_attention.dense" # persimmon
|
||||||
),
|
),
|
||||||
|
|
||||||
# Rotary embeddings
|
# Rotary embeddings
|
||||||
|
@ -405,6 +433,7 @@ class TensorNameMap:
|
||||||
"model.layers.{bid}.post_attention_layernorm", # llama-hf
|
"model.layers.{bid}.post_attention_layernorm", # llama-hf
|
||||||
"layers.{bid}.ffn_norm", # llama-pth
|
"layers.{bid}.ffn_norm", # llama-pth
|
||||||
"encoder.layer.{bid}.output.LayerNorm", # bert
|
"encoder.layer.{bid}.output.LayerNorm", # bert
|
||||||
|
"language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon
|
||||||
),
|
),
|
||||||
|
|
||||||
# Feed-forward up
|
# Feed-forward up
|
||||||
|
@ -417,6 +446,7 @@ class TensorNameMap:
|
||||||
"layers.{bid}.feed_forward.w3", # llama-pth
|
"layers.{bid}.feed_forward.w3", # llama-pth
|
||||||
"encoder.layer.{bid}.intermediate.dense", # bert
|
"encoder.layer.{bid}.intermediate.dense", # bert
|
||||||
"transformer.h.{bid}.mlp.fc_in", # gpt-j
|
"transformer.h.{bid}.mlp.fc_in", # gpt-j
|
||||||
|
"language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
|
||||||
),
|
),
|
||||||
|
|
||||||
# Feed-forward gate
|
# Feed-forward gate
|
||||||
|
@ -435,7 +465,20 @@ class TensorNameMap:
|
||||||
"layers.{bid}.feed_forward.w2", # llama-pth
|
"layers.{bid}.feed_forward.w2", # llama-pth
|
||||||
"encoder.layer.{bid}.output.dense", # bert
|
"encoder.layer.{bid}.output.dense", # bert
|
||||||
"transformer.h.{bid}.mlp.fc_out", # gpt-j
|
"transformer.h.{bid}.mlp.fc_out", # gpt-j
|
||||||
|
"language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
|
||||||
),
|
),
|
||||||
|
|
||||||
|
MODEL_TENSOR.ATTN_Q_NORM: (
|
||||||
|
"language_model.encoder.layers.{bid}.self_attention.q_layernorm",
|
||||||
|
),
|
||||||
|
|
||||||
|
MODEL_TENSOR.ATTN_K_NORM: (
|
||||||
|
"language_model.encoder.layers.{bid}.self_attention.k_layernorm",
|
||||||
|
),
|
||||||
|
|
||||||
|
MODEL_TENSOR.ROPE_FREQS: (
|
||||||
|
"language_model.encoder.layers.{bid}.self_attention.rotary_emb.inv_freq", # persimmon
|
||||||
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
mapping: dict[str, tuple[MODEL_TENSOR, str]]
|
mapping: dict[str, tuple[MODEL_TENSOR, str]]
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
[tool.poetry]
|
[tool.poetry]
|
||||||
name = "gguf"
|
name = "gguf"
|
||||||
version = "0.4.0"
|
version = "0.4.4"
|
||||||
description = "Write ML models in GGUF for GGML"
|
description = "Write ML models in GGUF for GGML"
|
||||||
authors = ["GGML <ggml@ggml.ai>"]
|
authors = ["GGML <ggml@ggml.ai>"]
|
||||||
packages = [
|
packages = [
|
||||||
|
|
10
k_quants.h
10
k_quants.h
|
@ -29,7 +29,7 @@
|
||||||
|
|
||||||
// 2-bit quantization
|
// 2-bit quantization
|
||||||
// weight is represented as x = a * q + b
|
// weight is represented as x = a * q + b
|
||||||
// 16 blocks of 16 elemenets each
|
// 16 blocks of 16 elements each
|
||||||
// Effectively 2.5625 bits per weight
|
// Effectively 2.5625 bits per weight
|
||||||
typedef struct {
|
typedef struct {
|
||||||
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
|
uint8_t scales[QK_K/16]; // scales and mins, quantized with 4 bits
|
||||||
|
@ -41,7 +41,7 @@ static_assert(sizeof(block_q2_K) == 2*sizeof(ggml_fp16_t) + QK_K/16 + QK_K/4, "w
|
||||||
|
|
||||||
// 3-bit quantization
|
// 3-bit quantization
|
||||||
// weight is represented as x = a * q
|
// weight is represented as x = a * q
|
||||||
// 16 blocks of 16 elemenets each
|
// 16 blocks of 16 elements each
|
||||||
// Effectively 3.4375 bits per weight
|
// Effectively 3.4375 bits per weight
|
||||||
#ifdef GGML_QKK_64
|
#ifdef GGML_QKK_64
|
||||||
typedef struct {
|
typedef struct {
|
||||||
|
@ -62,7 +62,7 @@ static_assert(sizeof(block_q3_K) == sizeof(ggml_fp16_t) + QK_K / 4 + QK_K / 8 +
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// 4-bit quantization
|
// 4-bit quantization
|
||||||
// 16 blocks of 32 elements each
|
// 8 blocks of 32 elements each
|
||||||
// weight is represented as x = a * q + b
|
// weight is represented as x = a * q + b
|
||||||
// Effectively 4.5 bits per weight
|
// Effectively 4.5 bits per weight
|
||||||
#ifdef GGML_QKK_64
|
#ifdef GGML_QKK_64
|
||||||
|
@ -83,7 +83,7 @@ static_assert(sizeof(block_q4_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// 5-bit quantization
|
// 5-bit quantization
|
||||||
// 16 blocks of 32 elements each
|
// 8 blocks of 32 elements each
|
||||||
// weight is represented as x = a * q + b
|
// weight is represented as x = a * q + b
|
||||||
// Effectively 5.5 bits per weight
|
// Effectively 5.5 bits per weight
|
||||||
#ifdef GGML_QKK_64
|
#ifdef GGML_QKK_64
|
||||||
|
@ -107,7 +107,7 @@ static_assert(sizeof(block_q5_K) == 2*sizeof(ggml_fp16_t) + K_SCALE_SIZE + QK_K/
|
||||||
|
|
||||||
// 6-bit quantization
|
// 6-bit quantization
|
||||||
// weight is represented as x = a * q
|
// weight is represented as x = a * q
|
||||||
// 16 blocks of 16 elemenets each
|
// 16 blocks of 16 elements each
|
||||||
// Effectively 6.5625 bits per weight
|
// Effectively 6.5625 bits per weight
|
||||||
typedef struct {
|
typedef struct {
|
||||||
uint8_t ql[QK_K/2]; // quants, lower 4 bits
|
uint8_t ql[QK_K/2]; // quants, lower 4 bits
|
||||||
|
|
618
llama.cpp
618
llama.cpp
|
@ -125,6 +125,27 @@ static void replace_all(std::string & s, const std::string & search, const std::
|
||||||
}
|
}
|
||||||
s = std::move(result);
|
s = std::move(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_float_close(float a, float b, float abs_tol) {
|
||||||
|
// Check for non-negative tolerance
|
||||||
|
if (abs_tol < 0.0) {
|
||||||
|
throw std::invalid_argument("Tolerance must be non-negative");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Exact equality check
|
||||||
|
if (a == b) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check for infinities
|
||||||
|
if (std::isinf(a) || std::isinf(b)) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Regular comparison using the provided absolute tolerance
|
||||||
|
return std::fabs(b - a) <= abs_tol;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef GGML_USE_CPU_HBM
|
#ifdef GGML_USE_CPU_HBM
|
||||||
#include <hbwmalloc.h>
|
#include <hbwmalloc.h>
|
||||||
#endif
|
#endif
|
||||||
|
@ -165,6 +186,7 @@ enum llm_arch {
|
||||||
LLM_ARCH_GPTNEOX,
|
LLM_ARCH_GPTNEOX,
|
||||||
LLM_ARCH_MPT,
|
LLM_ARCH_MPT,
|
||||||
LLM_ARCH_STARCODER,
|
LLM_ARCH_STARCODER,
|
||||||
|
LLM_ARCH_PERSIMMON,
|
||||||
LLM_ARCH_REFACT,
|
LLM_ARCH_REFACT,
|
||||||
LLM_ARCH_UNKNOWN,
|
LLM_ARCH_UNKNOWN,
|
||||||
};
|
};
|
||||||
|
@ -178,6 +200,7 @@ static std::map<llm_arch, std::string> LLM_ARCH_NAMES = {
|
||||||
{ LLM_ARCH_MPT, "mpt" },
|
{ LLM_ARCH_MPT, "mpt" },
|
||||||
{ LLM_ARCH_BAICHUAN, "baichuan" },
|
{ LLM_ARCH_BAICHUAN, "baichuan" },
|
||||||
{ LLM_ARCH_STARCODER, "starcoder" },
|
{ LLM_ARCH_STARCODER, "starcoder" },
|
||||||
|
{ LLM_ARCH_PERSIMMON, "persimmon" },
|
||||||
{ LLM_ARCH_REFACT, "refact" },
|
{ LLM_ARCH_REFACT, "refact" },
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -297,6 +320,8 @@ enum llm_tensor {
|
||||||
LLM_TENSOR_FFN_DOWN,
|
LLM_TENSOR_FFN_DOWN,
|
||||||
LLM_TENSOR_FFN_UP,
|
LLM_TENSOR_FFN_UP,
|
||||||
LLM_TENSOR_FFN_NORM,
|
LLM_TENSOR_FFN_NORM,
|
||||||
|
LLM_TENSOR_ATTN_Q_NORM,
|
||||||
|
LLM_TENSOR_ATTN_K_NORM,
|
||||||
};
|
};
|
||||||
|
|
||||||
static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
|
static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
|
||||||
|
@ -378,6 +403,23 @@ static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES =
|
||||||
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_PERSIMMON,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd"},
|
||||||
|
{ LLM_TENSOR_OUTPUT_NORM, "output_norm"},
|
||||||
|
{ LLM_TENSOR_OUTPUT, "output"},
|
||||||
|
{ LLM_TENSOR_ATTN_NORM, "blk.%d.attn_norm"},
|
||||||
|
{ LLM_TENSOR_ATTN_QKV, "blk.%d.attn_qkv"},
|
||||||
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output"},
|
||||||
|
{ LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm"},
|
||||||
|
{ LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm"},
|
||||||
|
{ LLM_TENSOR_FFN_NORM, "blk.%d.ffn_norm"},
|
||||||
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down"},
|
||||||
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up"},
|
||||||
|
{ LLM_TENSOR_ATTN_ROT_EMBD, "blk.%d.attn_rot_embd"},
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
LLM_ARCH_MPT,
|
LLM_ARCH_MPT,
|
||||||
{
|
{
|
||||||
|
@ -938,6 +980,7 @@ enum e_model {
|
||||||
MODEL_1B,
|
MODEL_1B,
|
||||||
MODEL_3B,
|
MODEL_3B,
|
||||||
MODEL_7B,
|
MODEL_7B,
|
||||||
|
MODEL_8B,
|
||||||
MODEL_13B,
|
MODEL_13B,
|
||||||
MODEL_15B,
|
MODEL_15B,
|
||||||
MODEL_30B,
|
MODEL_30B,
|
||||||
|
@ -969,7 +1012,24 @@ struct llama_hparams {
|
||||||
float rope_freq_scale_train;
|
float rope_freq_scale_train;
|
||||||
|
|
||||||
bool operator!=(const llama_hparams & other) const {
|
bool operator!=(const llama_hparams & other) const {
|
||||||
return static_cast<bool>(memcmp(this, &other, sizeof(llama_hparams))); // NOLINT
|
if (this->vocab_only != other.vocab_only) return true;
|
||||||
|
if (this->n_vocab != other.n_vocab) return true;
|
||||||
|
if (this->n_ctx_train != other.n_ctx_train) return true;
|
||||||
|
if (this->n_embd != other.n_embd) return true;
|
||||||
|
if (this->n_head != other.n_head) return true;
|
||||||
|
if (this->n_head_kv != other.n_head_kv) return true;
|
||||||
|
if (this->n_layer != other.n_layer) return true;
|
||||||
|
if (this->n_rot != other.n_rot) return true;
|
||||||
|
if (this->n_ff != other.n_ff) return true;
|
||||||
|
|
||||||
|
const float EPSILON = 1e-9;
|
||||||
|
|
||||||
|
if (!is_float_close(this->f_norm_eps, other.f_norm_eps, EPSILON)) return true;
|
||||||
|
if (!is_float_close(this->f_norm_rms_eps, other.f_norm_rms_eps, EPSILON)) return true;
|
||||||
|
if (!is_float_close(this->rope_freq_base_train, other.rope_freq_base_train, EPSILON)) return true;
|
||||||
|
if (!is_float_close(this->rope_freq_scale_train, other.rope_freq_scale_train, EPSILON)) return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
uint32_t n_gqa() const {
|
uint32_t n_gqa() const {
|
||||||
|
@ -1003,6 +1063,10 @@ struct llama_layer {
|
||||||
struct ggml_tensor * attn_norm_b;
|
struct ggml_tensor * attn_norm_b;
|
||||||
struct ggml_tensor * attn_norm_2;
|
struct ggml_tensor * attn_norm_2;
|
||||||
struct ggml_tensor * attn_norm_2_b;
|
struct ggml_tensor * attn_norm_2_b;
|
||||||
|
struct ggml_tensor * attn_q_norm;
|
||||||
|
struct ggml_tensor * attn_q_norm_b;
|
||||||
|
struct ggml_tensor * attn_k_norm;
|
||||||
|
struct ggml_tensor * attn_k_norm_b;
|
||||||
|
|
||||||
// attention
|
// attention
|
||||||
struct ggml_tensor * wq;
|
struct ggml_tensor * wq;
|
||||||
|
@ -1044,6 +1108,9 @@ struct llama_kv_cell {
|
||||||
struct llama_kv_cache {
|
struct llama_kv_cache {
|
||||||
bool has_shift = false;
|
bool has_shift = false;
|
||||||
|
|
||||||
|
// Note: The value of head isn't only used to optimize searching
|
||||||
|
// for a free KV slot. llama_decode_internal also uses it, so it
|
||||||
|
// cannot be freely changed after a slot has been allocated.
|
||||||
uint32_t head = 0;
|
uint32_t head = 0;
|
||||||
uint32_t size = 0;
|
uint32_t size = 0;
|
||||||
|
|
||||||
|
@ -1301,6 +1368,8 @@ static bool llama_kv_cache_init(
|
||||||
|
|
||||||
// find an empty slot of size "n_tokens" in the cache
|
// find an empty slot of size "n_tokens" in the cache
|
||||||
// updates the cache head
|
// updates the cache head
|
||||||
|
// Note: On success, it's important that cache.head points
|
||||||
|
// to the first cell of the slot.
|
||||||
static bool llama_kv_cache_find_slot(
|
static bool llama_kv_cache_find_slot(
|
||||||
struct llama_kv_cache & cache,
|
struct llama_kv_cache & cache,
|
||||||
const struct llama_batch & batch) {
|
const struct llama_batch & batch) {
|
||||||
|
@ -1316,8 +1385,8 @@ static bool llama_kv_cache_find_slot(
|
||||||
|
|
||||||
while (true) {
|
while (true) {
|
||||||
if (cache.head + n_tokens > n_ctx) {
|
if (cache.head + n_tokens > n_ctx) {
|
||||||
cache.head = 0;
|
|
||||||
n_tested += n_ctx - cache.head;
|
n_tested += n_ctx - cache.head;
|
||||||
|
cache.head = 0;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1368,6 +1437,9 @@ static void llama_kv_cache_tokens_rm(struct llama_kv_cache & cache, int32_t c0,
|
||||||
cache.cells[i].pos = -1;
|
cache.cells[i].pos = -1;
|
||||||
cache.cells[i].seq_id.clear();
|
cache.cells[i].seq_id.clear();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Searching for a free slot can start here since we know it will be empty.
|
||||||
|
cache.head = uint32_t(c0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void llama_kv_cache_seq_rm(
|
static void llama_kv_cache_seq_rm(
|
||||||
|
@ -1375,6 +1447,8 @@ static void llama_kv_cache_seq_rm(
|
||||||
llama_seq_id seq_id,
|
llama_seq_id seq_id,
|
||||||
llama_pos p0,
|
llama_pos p0,
|
||||||
llama_pos p1) {
|
llama_pos p1) {
|
||||||
|
uint32_t new_head = cache.size;
|
||||||
|
|
||||||
if (p0 < 0) p0 = 0;
|
if (p0 < 0) p0 = 0;
|
||||||
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
||||||
|
|
||||||
|
@ -1383,9 +1457,13 @@ static void llama_kv_cache_seq_rm(
|
||||||
cache.cells[i].seq_id.erase(seq_id);
|
cache.cells[i].seq_id.erase(seq_id);
|
||||||
if (cache.cells[i].seq_id.empty()) {
|
if (cache.cells[i].seq_id.empty()) {
|
||||||
cache.cells[i].pos = -1;
|
cache.cells[i].pos = -1;
|
||||||
|
if (new_head == cache.size) new_head = i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we freed up a slot, set head to it so searching can start there.
|
||||||
|
if (new_head != cache.size) cache.head = new_head;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void llama_kv_cache_seq_cp(
|
static void llama_kv_cache_seq_cp(
|
||||||
|
@ -1397,6 +1475,8 @@ static void llama_kv_cache_seq_cp(
|
||||||
if (p0 < 0) p0 = 0;
|
if (p0 < 0) p0 = 0;
|
||||||
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
||||||
|
|
||||||
|
cache.head = 0;
|
||||||
|
|
||||||
for (uint32_t i = 0; i < cache.size; ++i) {
|
for (uint32_t i = 0; i < cache.size; ++i) {
|
||||||
if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
|
if (cache.cells[i].has_seq_id(seq_id_src) && cache.cells[i].pos >= p0 && cache.cells[i].pos < p1) {
|
||||||
cache.cells[i].seq_id.insert(seq_id_dst);
|
cache.cells[i].seq_id.insert(seq_id_dst);
|
||||||
|
@ -1405,12 +1485,18 @@ static void llama_kv_cache_seq_cp(
|
||||||
}
|
}
|
||||||
|
|
||||||
static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
|
static void llama_kv_cache_seq_keep(struct llama_kv_cache & cache, llama_seq_id seq_id) {
|
||||||
|
uint32_t new_head = cache.size;
|
||||||
|
|
||||||
for (uint32_t i = 0; i < cache.size; ++i) {
|
for (uint32_t i = 0; i < cache.size; ++i) {
|
||||||
if (!cache.cells[i].has_seq_id(seq_id)) {
|
if (!cache.cells[i].has_seq_id(seq_id)) {
|
||||||
cache.cells[i].pos = -1;
|
cache.cells[i].pos = -1;
|
||||||
cache.cells[i].seq_id.clear();
|
cache.cells[i].seq_id.clear();
|
||||||
|
if (new_head == cache.size) new_head = i;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we freed up a slot, set head to it so searching can start there.
|
||||||
|
if (new_head != cache.size) cache.head = new_head;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void llama_kv_cache_seq_shift(
|
static void llama_kv_cache_seq_shift(
|
||||||
|
@ -1419,6 +1505,8 @@ static void llama_kv_cache_seq_shift(
|
||||||
llama_pos p0,
|
llama_pos p0,
|
||||||
llama_pos p1,
|
llama_pos p1,
|
||||||
llama_pos delta) {
|
llama_pos delta) {
|
||||||
|
uint32_t new_head = cache.size;
|
||||||
|
|
||||||
if (p0 < 0) p0 = 0;
|
if (p0 < 0) p0 = 0;
|
||||||
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
if (p1 < 0) p1 = std::numeric_limits<llama_pos>::max();
|
||||||
|
|
||||||
|
@ -1428,12 +1516,17 @@ static void llama_kv_cache_seq_shift(
|
||||||
if (cache.cells[i].pos < 0) {
|
if (cache.cells[i].pos < 0) {
|
||||||
cache.cells[i].pos = -1;
|
cache.cells[i].pos = -1;
|
||||||
cache.cells[i].seq_id.clear();
|
cache.cells[i].seq_id.clear();
|
||||||
|
if (new_head == cache.size) new_head = i;
|
||||||
} else {
|
} else {
|
||||||
cache.has_shift = true;
|
cache.has_shift = true;
|
||||||
cache.cells[i].delta = delta;
|
cache.cells[i].delta = delta;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// If we freed up a slot, set head to it so searching can start there.
|
||||||
|
// Otherwise we just start the next search from the beginning.
|
||||||
|
cache.head = new_head != cache.size ? new_head : 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -1834,6 +1927,7 @@ static const char * llama_model_type_name(e_model type) {
|
||||||
case MODEL_1B: return "1B";
|
case MODEL_1B: return "1B";
|
||||||
case MODEL_3B: return "3B";
|
case MODEL_3B: return "3B";
|
||||||
case MODEL_7B: return "7B";
|
case MODEL_7B: return "7B";
|
||||||
|
case MODEL_8B: return "8B";
|
||||||
case MODEL_13B: return "13B";
|
case MODEL_13B: return "13B";
|
||||||
case MODEL_15B: return "15B";
|
case MODEL_15B: return "15B";
|
||||||
case MODEL_30B: return "30B";
|
case MODEL_30B: return "30B";
|
||||||
|
@ -1946,6 +2040,14 @@ static void llm_load_hparams(
|
||||||
default: model.type = e_model::MODEL_UNKNOWN;
|
default: model.type = e_model::MODEL_UNKNOWN;
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
|
case LLM_ARCH_PERSIMMON:
|
||||||
|
{
|
||||||
|
GGUF_GET_KEY(ctx, hparams.f_norm_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_EPS));
|
||||||
|
switch (hparams.n_layer) {
|
||||||
|
case 36: model.type = e_model::MODEL_8B; break;
|
||||||
|
default: model.type = e_model::MODEL_UNKNOWN;
|
||||||
|
}
|
||||||
|
} break;
|
||||||
case LLM_ARCH_REFACT:
|
case LLM_ARCH_REFACT:
|
||||||
{
|
{
|
||||||
GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
|
GGUF_GET_KEY(ctx, hparams.f_norm_rms_eps, gguf_get_val_f32, GGUF_TYPE_FLOAT32, true, kv(LLM_KV_ATTENTION_LAYERNORM_RMS_EPS));
|
||||||
|
@ -2482,6 +2584,67 @@ static void llm_load_tensors(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
|
case LLM_ARCH_PERSIMMON:
|
||||||
|
{
|
||||||
|
model.tok_embeddings = ml.create_tensor(ctx, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, GGML_BACKEND_CPU);
|
||||||
|
|
||||||
|
{
|
||||||
|
ggml_backend backend_norm;
|
||||||
|
ggml_backend backend_output;
|
||||||
|
|
||||||
|
if (n_gpu_layers > int(n_layer)) {
|
||||||
|
// norm is not performance relevant on its own but keeping it in VRAM reduces data copying
|
||||||
|
// on Windows however this is detrimental unless everything is on the GPU
|
||||||
|
#ifndef _WIN32
|
||||||
|
backend_norm = LLAMA_BACKEND_OFFLOAD;
|
||||||
|
#else
|
||||||
|
backend_norm = n_gpu_layers <= (int) n_layer + 2 ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
|
||||||
|
#endif // _WIN32
|
||||||
|
|
||||||
|
backend_output = LLAMA_BACKEND_OFFLOAD_SPLIT;
|
||||||
|
} else {
|
||||||
|
backend_norm = GGML_BACKEND_CPU;
|
||||||
|
backend_output = GGML_BACKEND_CPU;
|
||||||
|
}
|
||||||
|
|
||||||
|
model.output_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}, backend_norm);
|
||||||
|
model.output_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd}, backend_norm);
|
||||||
|
model.output = ml.create_tensor(ctx, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, backend_output);
|
||||||
|
|
||||||
|
if (backend_norm == GGML_BACKEND_GPU) {
|
||||||
|
vram_weights += ggml_nbytes(model.output_norm);
|
||||||
|
vram_weights += ggml_nbytes(model.output_norm_b);
|
||||||
|
}
|
||||||
|
if (backend_output == GGML_BACKEND_GPU_SPLIT) {
|
||||||
|
vram_weights += ggml_nbytes(model.output);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const uint32_t n_ff = hparams.n_ff;
|
||||||
|
const int i_gpu_start = n_layer - n_gpu_layers;
|
||||||
|
model.layers.resize(n_layer);
|
||||||
|
for (uint32_t i = 0; i < n_layer; ++i) {
|
||||||
|
const ggml_backend backend = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD;
|
||||||
|
const ggml_backend backend_split = int(i) < i_gpu_start ? GGML_BACKEND_CPU : LLAMA_BACKEND_OFFLOAD_SPLIT;
|
||||||
|
auto & layer = model.layers[i];
|
||||||
|
layer.attn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "weight", i), {n_embd}, backend);
|
||||||
|
layer.attn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_NORM, "bias", i), {n_embd}, backend);
|
||||||
|
layer.wqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "weight", i), {n_embd, n_embd + 2*n_embd_gqa}, backend_split);
|
||||||
|
layer.bqkv = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_QKV, "bias", i), {n_embd + 2*n_embd_gqa}, backend_split);
|
||||||
|
layer.wo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}, backend_split);
|
||||||
|
layer.bo = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}, backend_split);
|
||||||
|
layer.w2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd}, backend_split);
|
||||||
|
layer.b2 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd}, backend_split);
|
||||||
|
layer.w3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff}, backend_split);
|
||||||
|
layer.b3 = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_UP, "bias", i), {n_ff}, backend_split);
|
||||||
|
layer.ffn_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "weight", i), {n_embd}, backend);
|
||||||
|
layer.ffn_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_FFN_NORM, "bias", i), {n_embd}, backend);
|
||||||
|
layer.attn_q_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {64}, backend);
|
||||||
|
layer.attn_q_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {64}, backend);
|
||||||
|
layer.attn_k_norm = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {64}, backend);
|
||||||
|
layer.attn_k_norm_b = ml.create_tensor(ctx, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {64}, backend);
|
||||||
|
}
|
||||||
|
} break;
|
||||||
default:
|
default:
|
||||||
throw std::runtime_error("unknown architecture");
|
throw std::runtime_error("unknown architecture");
|
||||||
}
|
}
|
||||||
|
@ -2630,11 +2793,9 @@ static struct ggml_cgraph * llm_build_llama(
|
||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
/*.mem_size =*/ buf_compute.size,
|
/*.mem_size =*/ buf_compute.size,
|
||||||
/*.mem_buffer =*/ buf_compute.data,
|
/*.mem_buffer =*/ buf_compute.data,
|
||||||
/*.no_alloc =*/ false,
|
/*.no_alloc =*/ true,
|
||||||
};
|
};
|
||||||
|
|
||||||
params.no_alloc = true;
|
|
||||||
|
|
||||||
struct ggml_context * ctx0 = ggml_init(params);
|
struct ggml_context * ctx0 = ggml_init(params);
|
||||||
|
|
||||||
ggml_cgraph * gf = ggml_new_graph(ctx0);
|
ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||||
|
@ -3018,11 +3179,9 @@ static struct ggml_cgraph * llm_build_baichaun(
|
||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
/*.mem_size =*/ buf_compute.size,
|
/*.mem_size =*/ buf_compute.size,
|
||||||
/*.mem_buffer =*/ buf_compute.data,
|
/*.mem_buffer =*/ buf_compute.data,
|
||||||
/*.no_alloc =*/ false,
|
/*.no_alloc =*/ true,
|
||||||
};
|
};
|
||||||
|
|
||||||
params.no_alloc = true;
|
|
||||||
|
|
||||||
struct ggml_context * ctx0 = ggml_init(params);
|
struct ggml_context * ctx0 = ggml_init(params);
|
||||||
|
|
||||||
ggml_cgraph * gf = ggml_new_graph(ctx0);
|
ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||||
|
@ -3419,11 +3578,9 @@ static struct ggml_cgraph * llm_build_refact(
|
||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
/*.mem_size =*/ buf_compute.size,
|
/*.mem_size =*/ buf_compute.size,
|
||||||
/*.mem_buffer =*/ buf_compute.data,
|
/*.mem_buffer =*/ buf_compute.data,
|
||||||
/*.no_alloc =*/ false,
|
/*.no_alloc =*/ true,
|
||||||
};
|
};
|
||||||
|
|
||||||
params.no_alloc = true;
|
|
||||||
|
|
||||||
struct ggml_context * ctx0 = ggml_init(params);
|
struct ggml_context * ctx0 = ggml_init(params);
|
||||||
|
|
||||||
ggml_cgraph * gf = ggml_new_graph(ctx0);
|
ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||||
|
@ -3773,11 +3930,9 @@ static struct ggml_cgraph * llm_build_falcon(
|
||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
/*.mem_size =*/ buf_compute.size,
|
/*.mem_size =*/ buf_compute.size,
|
||||||
/*.mem_buffer =*/ buf_compute.data,
|
/*.mem_buffer =*/ buf_compute.data,
|
||||||
/*.no_alloc =*/ false,
|
/*.no_alloc =*/ true,
|
||||||
};
|
};
|
||||||
|
|
||||||
params.no_alloc = true;
|
|
||||||
|
|
||||||
struct ggml_context * ctx0 = ggml_init(params);
|
struct ggml_context * ctx0 = ggml_init(params);
|
||||||
|
|
||||||
ggml_cgraph * gf = ggml_new_graph(ctx0);
|
ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||||
|
@ -4133,11 +4288,9 @@ static struct ggml_cgraph * llm_build_starcoder(
|
||||||
struct ggml_init_params params = {
|
struct ggml_init_params params = {
|
||||||
/*.mem_size =*/ buf_compute.size,
|
/*.mem_size =*/ buf_compute.size,
|
||||||
/*.mem_buffer =*/ buf_compute.data,
|
/*.mem_buffer =*/ buf_compute.data,
|
||||||
/*.no_alloc =*/ false,
|
/*.no_alloc =*/ true,
|
||||||
};
|
};
|
||||||
|
|
||||||
params.no_alloc = true;
|
|
||||||
|
|
||||||
struct ggml_context * ctx0 = ggml_init(params);
|
struct ggml_context * ctx0 = ggml_init(params);
|
||||||
|
|
||||||
ggml_cgraph * gf = ggml_new_graph(ctx0);
|
ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||||
|
@ -4348,6 +4501,404 @@ static struct ggml_cgraph * llm_build_starcoder(
|
||||||
return gf;
|
return gf;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static struct ggml_cgraph * llm_build_persimmon(
|
||||||
|
llama_context & lctx,
|
||||||
|
const llama_batch & batch) {
|
||||||
|
const auto & model = lctx.model;
|
||||||
|
const auto & hparams = model.hparams;
|
||||||
|
|
||||||
|
const auto & kv_self = lctx.kv_self;
|
||||||
|
|
||||||
|
GGML_ASSERT(!!kv_self.ctx);
|
||||||
|
|
||||||
|
const auto & cparams = lctx.cparams;
|
||||||
|
const int64_t n_embd = hparams.n_embd;
|
||||||
|
const int64_t n_layer = hparams.n_layer;
|
||||||
|
const int64_t n_ctx = cparams.n_ctx;
|
||||||
|
const int64_t n_head_kv = hparams.n_head_kv;
|
||||||
|
const int64_t n_head = hparams.n_head;
|
||||||
|
const int64_t n_embd_head = hparams.n_embd_head();
|
||||||
|
const int64_t n_embd_gqa = hparams.n_embd_gqa();
|
||||||
|
const size_t n_rot = n_embd_head / 2;
|
||||||
|
|
||||||
|
const float freq_base = cparams.rope_freq_base;
|
||||||
|
const float freq_scale = cparams.rope_freq_scale;
|
||||||
|
const float norm_eps = hparams.f_norm_eps;
|
||||||
|
|
||||||
|
const int n_gpu_layers = model.n_gpu_layers;
|
||||||
|
|
||||||
|
|
||||||
|
const int32_t n_tokens = batch.n_tokens;
|
||||||
|
const int32_t n_kv = ggml_allocr_is_measure(lctx.alloc) ? n_ctx : kv_self.n;
|
||||||
|
const int32_t kv_head = ggml_allocr_is_measure(lctx.alloc) ? n_ctx - n_tokens : kv_self.head;
|
||||||
|
|
||||||
|
const bool do_rope_shift = ggml_allocr_is_measure(lctx.alloc) || kv_self.has_shift;
|
||||||
|
|
||||||
|
auto & buf_compute = lctx.buf_compute;
|
||||||
|
struct ggml_init_params params = {
|
||||||
|
/*.mem_size =*/ buf_compute.size,
|
||||||
|
/*.mem_buffer =*/ buf_compute.data,
|
||||||
|
/*.no_alloc =*/ true,
|
||||||
|
};
|
||||||
|
|
||||||
|
struct ggml_context * ctx0 = ggml_init(params);
|
||||||
|
|
||||||
|
ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||||
|
|
||||||
|
struct ggml_tensor * cur;
|
||||||
|
struct ggml_tensor * inpL;
|
||||||
|
|
||||||
|
if (batch.token) {
|
||||||
|
struct ggml_tensor * inp_tokens = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
||||||
|
|
||||||
|
ggml_allocr_alloc(lctx.alloc, inp_tokens);
|
||||||
|
if (!ggml_allocr_is_measure(lctx.alloc)) {
|
||||||
|
memcpy(inp_tokens->data, batch.token, n_tokens*ggml_element_size(inp_tokens));
|
||||||
|
}
|
||||||
|
ggml_set_name(inp_tokens, "inp_tokens");
|
||||||
|
inpL = ggml_get_rows(ctx0, model.tok_embeddings, inp_tokens);
|
||||||
|
} else {
|
||||||
|
inpL = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_embd, n_tokens);
|
||||||
|
ggml_allocr_alloc(lctx.alloc, inpL);
|
||||||
|
if (!ggml_allocr_is_measure(lctx.alloc)) {
|
||||||
|
memcpy(inpL->data, batch.embd, n_tokens * n_embd * ggml_element_size(inpL));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
const int i_gpu_start = n_layer - n_gpu_layers;
|
||||||
|
(void) i_gpu_start;
|
||||||
|
offload_func_t offload_func_nr = llama_nop; // nr = non-repeating
|
||||||
|
offload_func_t offload_func_kq = llama_nop;
|
||||||
|
offload_func_t offload_func_v = llama_nop;
|
||||||
|
// KQ_scale
|
||||||
|
struct ggml_tensor * KQ_scale = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, 1);
|
||||||
|
ggml_allocr_alloc(lctx.alloc, KQ_scale);
|
||||||
|
if (!ggml_allocr_is_measure(lctx.alloc)) {
|
||||||
|
ggml_set_f32(KQ_scale, 1.0f/sqrtf(float(n_embd_head)));
|
||||||
|
}
|
||||||
|
ggml_set_name(KQ_scale, "1/sqrt(n_embd_head)");
|
||||||
|
struct ggml_tensor * KQ_mask = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_kv, n_tokens, 1);
|
||||||
|
offload_func_kq(KQ_mask);
|
||||||
|
ggml_set_name(KQ_mask, "KQ_mask");
|
||||||
|
ggml_allocr_alloc(lctx.alloc, KQ_mask);
|
||||||
|
|
||||||
|
if (!ggml_allocr_is_measure(lctx.alloc)) {
|
||||||
|
float * data = (float *) KQ_mask->data;
|
||||||
|
memset(data, 0, ggml_nbytes(KQ_mask));
|
||||||
|
for (int h = 0; h < 1; ++h) {
|
||||||
|
for (int j = 0; j < n_tokens; ++j) {
|
||||||
|
const llama_pos pos = batch.pos[j];
|
||||||
|
const llama_seq_id seq_id = batch.seq_id[j];
|
||||||
|
for (int i = 0; i < n_kv; ++i) {
|
||||||
|
if (!kv_self.cells[i].has_seq_id(seq_id) || kv_self.cells[i].pos > pos) {
|
||||||
|
data[h*(n_kv*n_tokens) + j*n_kv + i] = -INFINITY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
|
||||||
|
offload_func_kq(KQ_pos);
|
||||||
|
ggml_set_name(KQ_pos, "KQ_pos");
|
||||||
|
ggml_allocr_alloc(lctx.alloc, KQ_pos);
|
||||||
|
if (!ggml_allocr_is_measure(lctx.alloc)) {
|
||||||
|
int * data = (int *) KQ_pos->data;
|
||||||
|
for (int i = 0; i < n_tokens; ++i) {
|
||||||
|
data[i] = batch.pos[i];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (do_rope_shift) {
|
||||||
|
struct ggml_tensor * K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx);
|
||||||
|
offload_func_kq(K_shift);
|
||||||
|
ggml_set_name(K_shift, "K_shift");
|
||||||
|
ggml_allocr_alloc(lctx.alloc, K_shift);
|
||||||
|
if (!ggml_allocr_is_measure(lctx.alloc)) {
|
||||||
|
int * data = (int *) K_shift->data;
|
||||||
|
for (int i = 0; i < n_ctx; ++i) {
|
||||||
|
data[i] = kv_self.cells[i].delta;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
|
struct ggml_tensor * tmp =
|
||||||
|
// we rotate only the first n_rot dimensions.
|
||||||
|
ggml_rope_custom_inplace(ctx0,
|
||||||
|
ggml_view_3d(ctx0, kv_self.k,
|
||||||
|
n_rot, n_head, n_ctx,
|
||||||
|
ggml_element_size(kv_self.k)*n_embd_gqa,
|
||||||
|
ggml_element_size(kv_self.k)*n_embd_head,
|
||||||
|
ggml_element_size(kv_self.k)*(n_embd_head*n_ctx*il)
|
||||||
|
),
|
||||||
|
K_shift, n_rot, 2, 0, freq_base, freq_scale);
|
||||||
|
offload_func_kq(tmp);
|
||||||
|
ggml_build_forward_expand(gf, tmp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for (int il=0; il < n_layer; ++il) {
|
||||||
|
struct ggml_tensor * residual = inpL;
|
||||||
|
offload_func_t offload_func = llama_nop;
|
||||||
|
{
|
||||||
|
cur = ggml_norm(ctx0, inpL, norm_eps);
|
||||||
|
offload_func(cur);
|
||||||
|
cur = ggml_mul(ctx0, cur, model.layers[il].attn_norm);
|
||||||
|
offload_func(cur);
|
||||||
|
cur = ggml_add(ctx0, cur, model.layers[il].attn_norm_b);
|
||||||
|
offload_func(cur);
|
||||||
|
ggml_format_name(cur, "input_layernorm_%d", il);
|
||||||
|
}
|
||||||
|
// self attention
|
||||||
|
{
|
||||||
|
cur = ggml_mul_mat(ctx0, model.layers[il].wqkv, cur);
|
||||||
|
offload_func_kq(cur);
|
||||||
|
cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
|
||||||
|
offload_func_kq(cur);
|
||||||
|
|
||||||
|
// split qkv
|
||||||
|
GGML_ASSERT(n_head_kv == n_head);
|
||||||
|
ggml_set_name(cur, format("qkv_%d", il).c_str());
|
||||||
|
struct ggml_tensor * tmpqkv = ggml_reshape_4d(ctx0, cur, n_embd_head, 3, n_head, n_tokens);
|
||||||
|
offload_func_kq(tmpqkv);
|
||||||
|
struct ggml_tensor * tmpqkv_perm = ggml_cont(ctx0, ggml_permute(ctx0, tmpqkv, 0, 3, 1, 2));
|
||||||
|
offload_func_kq(tmpqkv_perm);
|
||||||
|
ggml_format_name(tmpqkv_perm, "tmpqkv_perm_%d", il);
|
||||||
|
struct ggml_tensor * tmpq = ggml_view_3d(
|
||||||
|
ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
|
||||||
|
ggml_element_size(tmpqkv_perm) * n_embd_head,
|
||||||
|
ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
|
||||||
|
0
|
||||||
|
);
|
||||||
|
offload_func_kq(tmpq);
|
||||||
|
struct ggml_tensor * tmpk = ggml_view_3d(
|
||||||
|
ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
|
||||||
|
ggml_element_size(tmpqkv_perm) * n_embd_head,
|
||||||
|
ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
|
||||||
|
ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens
|
||||||
|
);
|
||||||
|
offload_func_kq(tmpk);
|
||||||
|
// Q/K Layernorm
|
||||||
|
tmpq = ggml_norm(ctx0, tmpq, norm_eps);
|
||||||
|
offload_func_kq(tmpq);
|
||||||
|
tmpq = ggml_mul(ctx0, tmpq, model.layers[il].attn_q_norm);
|
||||||
|
offload_func_kq(tmpq);
|
||||||
|
tmpq = ggml_add(ctx0, tmpq, model.layers[il].attn_q_norm_b);
|
||||||
|
offload_func_kq(tmpq);
|
||||||
|
|
||||||
|
tmpk = ggml_norm(ctx0, tmpk, norm_eps);
|
||||||
|
offload_func_v(tmpk);
|
||||||
|
tmpk = ggml_mul(ctx0, tmpk, model.layers[il].attn_k_norm);
|
||||||
|
offload_func_v(tmpk);
|
||||||
|
tmpk = ggml_add(ctx0, tmpk, model.layers[il].attn_k_norm_b);
|
||||||
|
offload_func_v(tmpk);
|
||||||
|
|
||||||
|
// RoPE the first n_rot of q/k, pass the other half, and concat.
|
||||||
|
struct ggml_tensor * qrot = ggml_view_3d(
|
||||||
|
ctx0, tmpq, n_rot, n_head, n_tokens,
|
||||||
|
ggml_element_size(tmpq) * n_embd_head,
|
||||||
|
ggml_element_size(tmpq) * n_embd_head * n_head,
|
||||||
|
0
|
||||||
|
);
|
||||||
|
offload_func_kq(qrot);
|
||||||
|
ggml_format_name(qrot, "qrot_%d", il);
|
||||||
|
struct ggml_tensor * krot = ggml_view_3d(
|
||||||
|
ctx0, tmpk, n_rot, n_head, n_tokens,
|
||||||
|
ggml_element_size(tmpk) * n_embd_head,
|
||||||
|
ggml_element_size(tmpk) * n_embd_head * n_head,
|
||||||
|
0
|
||||||
|
);
|
||||||
|
offload_func_kq(krot);
|
||||||
|
ggml_format_name(krot, "krot_%d", il);
|
||||||
|
|
||||||
|
// get the second half of tmpq, e.g tmpq[n_rot:, :, :]
|
||||||
|
struct ggml_tensor * qpass = ggml_view_3d(
|
||||||
|
ctx0, tmpq, n_rot, n_head, n_tokens,
|
||||||
|
ggml_element_size(tmpq) * n_embd_head,
|
||||||
|
ggml_element_size(tmpq) * n_embd_head * n_head,
|
||||||
|
ggml_element_size(tmpq) * n_rot
|
||||||
|
);
|
||||||
|
offload_func_kq(qpass);
|
||||||
|
ggml_format_name(qpass, "qpass_%d", il);
|
||||||
|
struct ggml_tensor * kpass = ggml_view_3d(
|
||||||
|
ctx0, tmpk, n_rot, n_head, n_tokens,
|
||||||
|
ggml_element_size(tmpk) * n_embd_head,
|
||||||
|
ggml_element_size(tmpk) * n_embd_head * n_head,
|
||||||
|
ggml_element_size(tmpk) * n_rot
|
||||||
|
);
|
||||||
|
offload_func_kq(kpass);
|
||||||
|
ggml_format_name(kpass, "kpass_%d", il);
|
||||||
|
|
||||||
|
struct ggml_tensor * qrotated = ggml_rope_custom(
|
||||||
|
ctx0, qrot, KQ_pos, n_rot, 2, 0, freq_base, freq_scale
|
||||||
|
);
|
||||||
|
offload_func_kq(qrotated);
|
||||||
|
struct ggml_tensor * krotated = ggml_rope_custom(
|
||||||
|
ctx0, krot, KQ_pos, n_rot, 2, 0, freq_base, freq_scale
|
||||||
|
);
|
||||||
|
offload_func_kq(krotated);
|
||||||
|
// ggml currently only supports concatenation on dim=2
|
||||||
|
// so we need to permute qrot, qpass, concat, then permute back.
|
||||||
|
qrotated = ggml_cont(ctx0, ggml_permute(ctx0, qrotated, 2, 1, 0, 3));
|
||||||
|
offload_func_kq(qrotated);
|
||||||
|
krotated = ggml_cont(ctx0, ggml_permute(ctx0, krotated, 2, 1, 0, 3));
|
||||||
|
offload_func_kq(krotated);
|
||||||
|
|
||||||
|
qpass = ggml_cont(ctx0, ggml_permute(ctx0, qpass, 2, 1, 0, 3));
|
||||||
|
offload_func_kq(qpass);
|
||||||
|
kpass = ggml_cont(ctx0, ggml_permute(ctx0, kpass, 2, 1, 0, 3));
|
||||||
|
offload_func_kq(kpass);
|
||||||
|
|
||||||
|
struct ggml_tensor * Qcur = ggml_concat(ctx0, qrotated, qpass);
|
||||||
|
offload_func_kq(Qcur);
|
||||||
|
struct ggml_tensor * Kcur = ggml_concat(ctx0, krotated, kpass);
|
||||||
|
offload_func_kq(Kcur);
|
||||||
|
|
||||||
|
struct ggml_tensor * Q = ggml_cont(ctx0, ggml_permute(ctx0, Qcur, 1, 2, 0, 3));
|
||||||
|
offload_func_kq(Q);
|
||||||
|
|
||||||
|
Kcur = ggml_cont(ctx0, ggml_permute(ctx0, Kcur, 2, 1, 0, 3));
|
||||||
|
offload_func_kq(Kcur);
|
||||||
|
{
|
||||||
|
struct ggml_tensor * tmpv = ggml_view_3d(
|
||||||
|
ctx0, tmpqkv_perm, n_embd_head, n_head, n_tokens,
|
||||||
|
ggml_element_size(tmpqkv_perm) * n_embd_head,
|
||||||
|
ggml_element_size(tmpqkv_perm) * n_embd_head * n_head,
|
||||||
|
ggml_element_size(tmpqkv_perm) * n_embd_head * n_head * n_tokens * 2
|
||||||
|
);
|
||||||
|
offload_func_v(tmpv);
|
||||||
|
// store K, V in cache
|
||||||
|
struct ggml_tensor * Vcur = ggml_transpose(ctx0, ggml_reshape_2d(ctx0, tmpv, n_embd_gqa, n_tokens));
|
||||||
|
offload_func_v(Vcur);
|
||||||
|
ggml_set_name(Vcur, "Vcur");
|
||||||
|
|
||||||
|
struct ggml_tensor * k = ggml_view_1d(
|
||||||
|
ctx0, kv_self.k, n_tokens*n_embd_gqa,
|
||||||
|
(ggml_element_size(kv_self.k)*n_embd_gqa)*(il*n_ctx + kv_head)
|
||||||
|
);
|
||||||
|
offload_func_kq(k);
|
||||||
|
ggml_set_name(k, "k");
|
||||||
|
|
||||||
|
struct ggml_tensor * v = ggml_view_2d(ctx0, kv_self.v, n_tokens, n_embd_gqa,
|
||||||
|
( n_ctx)*ggml_element_size(kv_self.v),
|
||||||
|
(il*n_ctx)*ggml_element_size(kv_self.v)*n_embd_gqa + kv_head*ggml_element_size(kv_self.v));
|
||||||
|
offload_func_v(v);
|
||||||
|
ggml_set_name(v, "v");
|
||||||
|
|
||||||
|
// important: storing RoPE-ed version of K in the KV cache!
|
||||||
|
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Kcur, k));
|
||||||
|
ggml_build_forward_expand(gf, ggml_cpy(ctx0, Vcur, v));
|
||||||
|
}
|
||||||
|
struct ggml_tensor * K = ggml_view_3d(ctx0, kv_self.k,
|
||||||
|
n_embd_head, n_kv, n_head_kv,
|
||||||
|
ggml_element_size(kv_self.k)*n_embd_gqa,
|
||||||
|
ggml_element_size(kv_self.k)*n_embd_head,
|
||||||
|
ggml_element_size(kv_self.k)*n_embd_gqa*n_ctx*il);
|
||||||
|
|
||||||
|
offload_func_kq(K);
|
||||||
|
ggml_format_name(K, "K_%d", il);
|
||||||
|
|
||||||
|
struct ggml_tensor * KQ = ggml_mul_mat(ctx0, K, Q);
|
||||||
|
offload_func_kq(KQ);
|
||||||
|
ggml_set_name(KQ, "KQ");
|
||||||
|
|
||||||
|
struct ggml_tensor * KQ_scaled = ggml_scale(ctx0, KQ, KQ_scale);
|
||||||
|
offload_func_kq(KQ_scaled);
|
||||||
|
ggml_set_name(KQ_scaled, "KQ_scaled");
|
||||||
|
|
||||||
|
struct ggml_tensor * KQ_masked = ggml_add(ctx0, KQ_scaled, KQ_mask);
|
||||||
|
offload_func_kq(KQ_masked);
|
||||||
|
ggml_set_name(KQ_masked, "KQ_masked");
|
||||||
|
|
||||||
|
struct ggml_tensor * KQ_soft_max = ggml_soft_max_inplace(ctx0, KQ_masked);
|
||||||
|
offload_func_kq(KQ_soft_max);
|
||||||
|
ggml_set_name(KQ_soft_max, "KQ_soft_max");
|
||||||
|
|
||||||
|
struct ggml_tensor * V =
|
||||||
|
ggml_view_3d(ctx0, kv_self.v,
|
||||||
|
n_kv, n_embd_head, n_head_kv,
|
||||||
|
ggml_element_size(kv_self.v)*n_ctx,
|
||||||
|
ggml_element_size(kv_self.v)*n_ctx*n_embd_head,
|
||||||
|
ggml_element_size(kv_self.v)*n_ctx*n_embd_gqa*il);
|
||||||
|
offload_func_v(V);
|
||||||
|
ggml_set_name(V, "V");
|
||||||
|
|
||||||
|
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V, KQ_soft_max);
|
||||||
|
offload_func_v(KQV);
|
||||||
|
ggml_set_name(KQV, "KQV");
|
||||||
|
|
||||||
|
struct ggml_tensor * KQV_merged = ggml_permute(ctx0, KQV, 0, 2, 1, 3);
|
||||||
|
offload_func_v(KQV_merged);
|
||||||
|
ggml_set_name(KQV_merged, "KQV_merged");
|
||||||
|
|
||||||
|
cur = ggml_cont_2d(ctx0, KQV_merged, n_embd, n_tokens);
|
||||||
|
offload_func_v(cur);
|
||||||
|
ggml_set_name(cur, "KQV_merged_contiguous");
|
||||||
|
|
||||||
|
cur = ggml_mul_mat(ctx0, model.layers[il].wo, cur);
|
||||||
|
offload_func(cur);
|
||||||
|
cur = ggml_add(ctx0, cur, model.layers[il].bo);
|
||||||
|
offload_func(cur);
|
||||||
|
ggml_set_name(cur, "result_wo");
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * inpFF = ggml_add(ctx0, residual, cur);
|
||||||
|
offload_func(inpFF);
|
||||||
|
ggml_set_name(inpFF, "inpFF");
|
||||||
|
{
|
||||||
|
// MLP
|
||||||
|
{
|
||||||
|
// Norm
|
||||||
|
cur = ggml_norm(ctx0, inpFF, norm_eps);
|
||||||
|
offload_func(cur);
|
||||||
|
cur = ggml_add(ctx0,
|
||||||
|
ggml_mul(ctx0, cur, model.layers[il].ffn_norm),
|
||||||
|
model.layers[il].ffn_norm_b
|
||||||
|
);
|
||||||
|
ggml_set_name(cur, "ffn_norm");
|
||||||
|
offload_func(cur);
|
||||||
|
}
|
||||||
|
cur = ggml_mul_mat(ctx0, model.layers[il].w3, cur);
|
||||||
|
offload_func(cur);
|
||||||
|
|
||||||
|
cur = ggml_add(ctx0, cur, model.layers[il].b3);
|
||||||
|
offload_func(cur);
|
||||||
|
ggml_set_name(cur, "result_ffn_up");
|
||||||
|
|
||||||
|
cur = ggml_sqr(ctx0, ggml_relu(ctx0, cur));
|
||||||
|
ggml_set_name(cur, "result_ffn_act");
|
||||||
|
offload_func(cur);
|
||||||
|
offload_func(cur->src[0]);
|
||||||
|
|
||||||
|
cur = ggml_mul_mat(ctx0, model.layers[il].w2, cur);
|
||||||
|
offload_func(cur);
|
||||||
|
cur = ggml_add(ctx0,
|
||||||
|
cur,
|
||||||
|
model.layers[il].b2);
|
||||||
|
offload_func(cur);
|
||||||
|
ggml_set_name(cur, "outFF");
|
||||||
|
}
|
||||||
|
cur = ggml_add(ctx0, cur, inpFF);
|
||||||
|
offload_func(cur);
|
||||||
|
ggml_set_name(cur, "inpFF_+_outFF");
|
||||||
|
inpL = cur;
|
||||||
|
}
|
||||||
|
cur = inpL;
|
||||||
|
{
|
||||||
|
cur = ggml_norm(ctx0, cur, norm_eps);
|
||||||
|
offload_func_nr(cur);
|
||||||
|
cur = ggml_mul(ctx0, cur, model.output_norm);
|
||||||
|
offload_func_nr(cur);
|
||||||
|
|
||||||
|
cur = ggml_add(ctx0, cur, model.output_norm_b);
|
||||||
|
// offload_func_nr(cur);
|
||||||
|
|
||||||
|
ggml_set_name(cur, "result_norm");
|
||||||
|
}
|
||||||
|
cur = ggml_mul_mat(ctx0, model.output, cur);
|
||||||
|
ggml_set_name(cur, "result_output");
|
||||||
|
ggml_build_forward_expand(gf, cur);
|
||||||
|
ggml_free(ctx0);
|
||||||
|
return gf;
|
||||||
|
}
|
||||||
|
|
||||||
static struct ggml_cgraph * llama_build_graph(
|
static struct ggml_cgraph * llama_build_graph(
|
||||||
llama_context & lctx,
|
llama_context & lctx,
|
||||||
const llama_batch & batch) {
|
const llama_batch & batch) {
|
||||||
|
@ -4372,6 +4923,10 @@ static struct ggml_cgraph * llama_build_graph(
|
||||||
{
|
{
|
||||||
result = llm_build_starcoder(lctx, batch);
|
result = llm_build_starcoder(lctx, batch);
|
||||||
} break;
|
} break;
|
||||||
|
case LLM_ARCH_PERSIMMON:
|
||||||
|
{
|
||||||
|
result = llm_build_persimmon(lctx, batch);
|
||||||
|
} break;
|
||||||
case LLM_ARCH_REFACT:
|
case LLM_ARCH_REFACT:
|
||||||
{
|
{
|
||||||
result = llm_build_refact(lctx, batch);
|
result = llm_build_refact(lctx, batch);
|
||||||
|
@ -4454,10 +5009,6 @@ static int llama_decode_internal(
|
||||||
batch.seq_id = seq_id.data();
|
batch.seq_id = seq_id.data();
|
||||||
}
|
}
|
||||||
|
|
||||||
// we always start to search for a free slot from the start of the cache
|
|
||||||
// TODO: better strategies can be implemented
|
|
||||||
kv_self.head = 0;
|
|
||||||
|
|
||||||
if (!llama_kv_cache_find_slot(kv_self, batch)) {
|
if (!llama_kv_cache_find_slot(kv_self, batch)) {
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
@ -4543,8 +5094,12 @@ static int llama_decode_internal(
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// update the kv ring buffer
|
// update the kv ring buffer
|
||||||
lctx.kv_self.head += n_tokens;
|
|
||||||
lctx.kv_self.has_shift = false;
|
lctx.kv_self.has_shift = false;
|
||||||
|
lctx.kv_self.head += n_tokens;
|
||||||
|
// Ensure kv cache head points to a valid index.
|
||||||
|
if (lctx.kv_self.head >= lctx.kv_self.size) {
|
||||||
|
lctx.kv_self.head = 0;
|
||||||
|
}
|
||||||
|
|
||||||
#ifdef GGML_PERF
|
#ifdef GGML_PERF
|
||||||
// print timing information per ggml operation (for debugging purposes)
|
// print timing information per ggml operation (for debugging purposes)
|
||||||
|
@ -6639,6 +7194,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
}
|
}
|
||||||
|
|
||||||
std::ofstream fout(fname_out, std::ios::binary);
|
std::ofstream fout(fname_out, std::ios::binary);
|
||||||
|
fout.exceptions(std::ofstream::failbit); // fail fast on write errors
|
||||||
|
|
||||||
const size_t meta_size = gguf_get_meta_size(ctx_out);
|
const size_t meta_size = gguf_get_meta_size(ctx_out);
|
||||||
|
|
||||||
|
@ -8133,7 +8689,9 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch
|
||||||
buf[0] = llama_token_to_byte(model->vocab, token);
|
buf[0] = llama_token_to_byte(model->vocab, token);
|
||||||
return 1;
|
return 1;
|
||||||
} else {
|
} else {
|
||||||
GGML_ASSERT(false);
|
// TODO: for now we accept all unsupported token types,
|
||||||
|
// suppressing them like CONTROL tokens.
|
||||||
|
// GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -8149,7 +8707,9 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch
|
||||||
} else if (llama_is_control_token(model->vocab, token)) {
|
} else if (llama_is_control_token(model->vocab, token)) {
|
||||||
;
|
;
|
||||||
} else {
|
} else {
|
||||||
GGML_ASSERT(false);
|
// TODO: for now we accept all unsupported token types,
|
||||||
|
// suppressing them like CONTROL tokens.
|
||||||
|
// GGML_ASSERT(false);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -8181,14 +8741,14 @@ void llama_print_timings(struct llama_context * ctx) {
|
||||||
const llama_timings timings = llama_get_timings(ctx);
|
const llama_timings timings = llama_get_timings(ctx);
|
||||||
|
|
||||||
LLAMA_LOG_INFO("\n");
|
LLAMA_LOG_INFO("\n");
|
||||||
LLAMA_LOG_INFO("%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
|
LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, timings.t_load_ms);
|
||||||
LLAMA_LOG_INFO("%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
LLAMA_LOG_INFO("%s: sample time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||||
__func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
|
__func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
|
||||||
LLAMA_LOG_INFO("%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
|
LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||||
__func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
|
__func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
|
||||||
LLAMA_LOG_INFO("%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||||
__func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
|
__func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
|
||||||
LLAMA_LOG_INFO("%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
|
LLAMA_LOG_INFO("%s: total time = %10.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_reset_timings(struct llama_context * ctx) {
|
void llama_reset_timings(struct llama_context * ctx) {
|
||||||
|
|
49
prompts/LLM-questions.txt
Normal file
49
prompts/LLM-questions.txt
Normal file
|
@ -0,0 +1,49 @@
|
||||||
|
In the context of LLMs, what is "Attention"?
|
||||||
|
In the context of LLMs, what is a completion?
|
||||||
|
In the context of LLMs, what is a prompt?
|
||||||
|
In the context of LLMs, what is GELU?
|
||||||
|
In the context of LLMs, what is RELU?
|
||||||
|
In the context of LLMs, what is softmax?
|
||||||
|
In the context of LLMs, what is decoding?
|
||||||
|
In the context of LLMs, what is encoding?
|
||||||
|
In the context of LLMs, what is tokenizing?
|
||||||
|
In the context of LLMs, what is an embedding?
|
||||||
|
In the context of LLMs, what is quantization?
|
||||||
|
In the context of LLMs, what is a tensor?
|
||||||
|
In the context of LLMs, what is a sparse tensor?
|
||||||
|
In the context of LLMs, what is a vector?
|
||||||
|
In the context of LLMs, how is attention implemented?
|
||||||
|
In the context of LLMs, why is attention all you need?
|
||||||
|
In the context of LLMs, what is "RoPe" and what is it used for?
|
||||||
|
In the context of LLMs, what is "LoRA" and what is it used for?
|
||||||
|
In the context of LLMs, what are weights?
|
||||||
|
In the context of LLMs, what are biases?
|
||||||
|
In the context of LLMs, what are checkpoints?
|
||||||
|
In the context of LLMs, what is "perplexity"?
|
||||||
|
In the context of LLMs, what are models?
|
||||||
|
In the context of machine-learning, what is "catastrophic forgetting"?
|
||||||
|
In the context of machine-learning, what is "elastic weight consolidation (EWC)"?
|
||||||
|
In the context of neural nets, what is a hidden layer?
|
||||||
|
In the context of neural nets, what is a convolution?
|
||||||
|
In the context of neural nets, what is dropout?
|
||||||
|
In the context of neural nets, what is cross-entropy?
|
||||||
|
In the context of neural nets, what is over-fitting?
|
||||||
|
In the context of neural nets, what is under-fitting?
|
||||||
|
What is the difference between an interpreted computer language and a compiled computer language?
|
||||||
|
In the context of software development, what is a debugger?
|
||||||
|
When processing using a GPU, what is off-loading?
|
||||||
|
When processing using a GPU, what is a batch?
|
||||||
|
When processing using a GPU, what is a block?
|
||||||
|
When processing using a GPU, what is the difference between a batch and a block?
|
||||||
|
When processing using a GPU, what is a scratch tensor?
|
||||||
|
When processing using a GPU, what is a layer?
|
||||||
|
When processing using a GPU, what is a cache?
|
||||||
|
When processing using a GPU, what is unified memory?
|
||||||
|
When processing using a GPU, what is VRAM?
|
||||||
|
When processing using a GPU, what is a kernel?
|
||||||
|
When processing using a GPU, what is "metal"?
|
||||||
|
In the context of LLMs, what are "Zero-Shot", "One-Shot" and "Few-Shot" learning models?
|
||||||
|
In the context of LLMs, what is the "Transformer-model" architecture?
|
||||||
|
In the context of LLMs, what is "Multi-Head Attention"?
|
||||||
|
In the context of LLMs, what is "Self-Attention"?
|
||||||
|
In the context of transformer-model architectures, how do attention mechanisms use masks?
|
43
prompts/parallel-questions.txt
Normal file
43
prompts/parallel-questions.txt
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
What do you know about Hobbits?
|
||||||
|
What is quantum field theory?
|
||||||
|
Why did the chicken cross the road?
|
||||||
|
Who is the president of the United States?
|
||||||
|
How do I run CMake on MacOS?
|
||||||
|
Do you agree that C++ is a really finicky language compared with Python3?
|
||||||
|
Is it a good idea to invest in technology?
|
||||||
|
Do you like Wagner's Ring?
|
||||||
|
Do you think this file input option is really neat?
|
||||||
|
What should we all do about climate change?
|
||||||
|
Is time-travel possible within the laws of current physics?
|
||||||
|
Is it like anything to be a bat?
|
||||||
|
Once the chicken has crossed the road, does it try to go back?
|
||||||
|
Who is the greatest of all musical composers?
|
||||||
|
What is art?
|
||||||
|
Is there life elsewhere in the universe?
|
||||||
|
What is intelligence?
|
||||||
|
What is the difference between knowledge and intelligence?
|
||||||
|
Will religion ever die?
|
||||||
|
Do we understand ourselves?
|
||||||
|
What is the best way to cook eggs?
|
||||||
|
If you cannot see things, on what basis do you evaluate them?
|
||||||
|
Explain the role of the np junction in photovoltaic cells?
|
||||||
|
Is professional sport a good or bad influence on human behaviour?
|
||||||
|
Is capital punishment immoral?
|
||||||
|
Should we care about other people?
|
||||||
|
Who are you?
|
||||||
|
Which sense would you surrender if you could?
|
||||||
|
Was Henry Ford a hero or a villain?
|
||||||
|
Do we need leaders?
|
||||||
|
What is nucleosynthesis?
|
||||||
|
Who is the greatest scientist of all time?
|
||||||
|
Who first observed what came to be known as the photovoltaic effect?
|
||||||
|
What is nuclear fusion and why does it release energy?
|
||||||
|
Can you know that you exist?
|
||||||
|
What is an exoplanet?
|
||||||
|
Do you like cream?
|
||||||
|
What is the difference?
|
||||||
|
Can I know that I exist while I'm dreaming that I'm Descartes?
|
||||||
|
Who said "I didn't know I thought that until I heard myself saying it"?
|
||||||
|
Does anything really matter?
|
||||||
|
Can you explain the unreasonable effectiveness of mathematics?
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
numpy==1.24
|
numpy==1.24.4
|
||||||
sentencepiece==0.1.98
|
sentencepiece==0.1.98
|
||||||
gguf>=0.1.0
|
gguf>=0.1.0
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue