Merge branch 'ggerganov:master' into master
This commit is contained in:
commit
472a9b8be5
46 changed files with 1625 additions and 1144 deletions
|
@ -140,6 +140,7 @@ Typically finetunes of the base models below are supported as well.
|
||||||
- [x] [MobileVLM 1.7B/3B models](https://huggingface.co/models?search=mobileVLM)
|
- [x] [MobileVLM 1.7B/3B models](https://huggingface.co/models?search=mobileVLM)
|
||||||
- [x] [Yi-VL](https://huggingface.co/models?search=Yi-VL)
|
- [x] [Yi-VL](https://huggingface.co/models?search=Yi-VL)
|
||||||
- [x] [Mini CPM](https://huggingface.co/models?search=MiniCPM)
|
- [x] [Mini CPM](https://huggingface.co/models?search=MiniCPM)
|
||||||
|
- [x] [Moondream](https://huggingface.co/vikhyatk/moondream2)
|
||||||
|
|
||||||
**HTTP server**
|
**HTTP server**
|
||||||
|
|
||||||
|
|
|
@ -901,6 +901,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
|
||||||
params.interactive = true;
|
params.interactive = true;
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
if (arg == "--interactive-specials") {
|
||||||
|
params.interactive_specials = true;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (arg == "--embedding") {
|
if (arg == "--embedding") {
|
||||||
params.embedding = true;
|
params.embedding = true;
|
||||||
return true;
|
return true;
|
||||||
|
@ -1367,15 +1371,13 @@ bool gpt_params_parse_ex(int argc, char ** argv, gpt_params & params) {
|
||||||
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
|
if (arg.compare(0, arg_prefix.size(), arg_prefix) == 0) {
|
||||||
std::replace(arg.begin(), arg.end(), '_', '-');
|
std::replace(arg.begin(), arg.end(), '_', '-');
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!gpt_params_find_arg(argc, argv, arg, params, i, invalid_param)) {
|
if (!gpt_params_find_arg(argc, argv, arg, params, i, invalid_param)) {
|
||||||
throw std::invalid_argument("error: unknown argument: " + arg);
|
throw std::invalid_argument("error: unknown argument: " + arg);
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
if (invalid_param) {
|
if (invalid_param) {
|
||||||
throw std::invalid_argument("error: invalid parameter for argument: " + arg);
|
throw std::invalid_argument("error: invalid parameter for argument: " + arg);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (params.prompt_cache_all &&
|
if (params.prompt_cache_all &&
|
||||||
(params.interactive || params.interactive_first ||
|
(params.interactive || params.interactive_first ||
|
||||||
|
@ -1422,6 +1424,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
|
||||||
printf(" -h, --help show this help message and exit\n");
|
printf(" -h, --help show this help message and exit\n");
|
||||||
printf(" --version show version and build info\n");
|
printf(" --version show version and build info\n");
|
||||||
printf(" -i, --interactive run in interactive mode\n");
|
printf(" -i, --interactive run in interactive mode\n");
|
||||||
|
printf(" --interactive-specials allow special tokens in user text, in interactive mode\n");
|
||||||
printf(" --interactive-first run in interactive mode and wait for input right away\n");
|
printf(" --interactive-first run in interactive mode and wait for input right away\n");
|
||||||
printf(" -cnv, --conversation run in conversation mode (does not print special tokens and suffix/prefix)\n");
|
printf(" -cnv, --conversation run in conversation mode (does not print special tokens and suffix/prefix)\n");
|
||||||
printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n");
|
printf(" -ins, --instruct run in instruction mode (use with Alpaca models)\n");
|
||||||
|
@ -2652,6 +2655,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
|
||||||
dump_string_yaml_multiline(stream, "in_suffix", params.input_prefix.c_str());
|
dump_string_yaml_multiline(stream, "in_suffix", params.input_prefix.c_str());
|
||||||
fprintf(stream, "instruct: %s # default: false\n", params.instruct ? "true" : "false");
|
fprintf(stream, "instruct: %s # default: false\n", params.instruct ? "true" : "false");
|
||||||
fprintf(stream, "interactive: %s # default: false\n", params.interactive ? "true" : "false");
|
fprintf(stream, "interactive: %s # default: false\n", params.interactive ? "true" : "false");
|
||||||
|
fprintf(stream, "interactive_specials: %s # default: false\n", params.interactive_specials ? "true" : "false");
|
||||||
fprintf(stream, "interactive_first: %s # default: false\n", params.interactive_first ? "true" : "false");
|
fprintf(stream, "interactive_first: %s # default: false\n", params.interactive_first ? "true" : "false");
|
||||||
fprintf(stream, "keep: %d # default: 0\n", params.n_keep);
|
fprintf(stream, "keep: %d # default: 0\n", params.n_keep);
|
||||||
fprintf(stream, "logdir: %s # default: unset (no logging)\n", params.logdir.c_str());
|
fprintf(stream, "logdir: %s # default: unset (no logging)\n", params.logdir.c_str());
|
||||||
|
|
|
@ -140,6 +140,7 @@ struct gpt_params {
|
||||||
bool random_prompt = false; // do not randomize prompt if none provided
|
bool random_prompt = false; // do not randomize prompt if none provided
|
||||||
bool use_color = false; // use color to distinguish generations and inputs
|
bool use_color = false; // use color to distinguish generations and inputs
|
||||||
bool interactive = false; // interactive mode
|
bool interactive = false; // interactive mode
|
||||||
|
bool interactive_specials = false; // whether to allow special tokens from user, during interactive mode
|
||||||
bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
|
bool conversation = false; // conversation mode (does not print special tokens and suffix/prefix)
|
||||||
bool chatml = false; // chatml mode (used for models trained on chatml syntax)
|
bool chatml = false; // chatml mode (used for models trained on chatml syntax)
|
||||||
bool prompt_cache_all = false; // save user input and generations to prompt cache
|
bool prompt_cache_all = false; // save user input and generations to prompt cache
|
||||||
|
|
|
@ -142,6 +142,9 @@ namespace grammar_parser {
|
||||||
pos++;
|
pos++;
|
||||||
last_sym_start = out_elements.size();
|
last_sym_start = out_elements.size();
|
||||||
while (*pos != '"') {
|
while (*pos != '"') {
|
||||||
|
if (!*pos) {
|
||||||
|
throw std::runtime_error("unexpected end of input");
|
||||||
|
}
|
||||||
auto char_pair = parse_char(pos);
|
auto char_pair = parse_char(pos);
|
||||||
pos = char_pair.second;
|
pos = char_pair.second;
|
||||||
out_elements.push_back({LLAMA_GRETYPE_CHAR, char_pair.first});
|
out_elements.push_back({LLAMA_GRETYPE_CHAR, char_pair.first});
|
||||||
|
@ -156,6 +159,9 @@ namespace grammar_parser {
|
||||||
}
|
}
|
||||||
last_sym_start = out_elements.size();
|
last_sym_start = out_elements.size();
|
||||||
while (*pos != ']') {
|
while (*pos != ']') {
|
||||||
|
if (!*pos) {
|
||||||
|
throw std::runtime_error("unexpected end of input");
|
||||||
|
}
|
||||||
auto char_pair = parse_char(pos);
|
auto char_pair = parse_char(pos);
|
||||||
pos = char_pair.second;
|
pos = char_pair.second;
|
||||||
enum llama_gretype type = last_sym_start < out_elements.size()
|
enum llama_gretype type = last_sym_start < out_elements.size()
|
||||||
|
@ -164,6 +170,9 @@ namespace grammar_parser {
|
||||||
|
|
||||||
out_elements.push_back({type, char_pair.first});
|
out_elements.push_back({type, char_pair.first});
|
||||||
if (pos[0] == '-' && pos[1] != ']') {
|
if (pos[0] == '-' && pos[1] != ']') {
|
||||||
|
if (!pos[1]) {
|
||||||
|
throw std::runtime_error("unexpected end of input");
|
||||||
|
}
|
||||||
auto endchar_pair = parse_char(pos + 1);
|
auto endchar_pair = parse_char(pos + 1);
|
||||||
pos = endchar_pair.second;
|
pos = endchar_pair.second;
|
||||||
out_elements.push_back({LLAMA_GRETYPE_CHAR_RNG_UPPER, endchar_pair.first});
|
out_elements.push_back({LLAMA_GRETYPE_CHAR_RNG_UPPER, endchar_pair.first});
|
||||||
|
|
|
@ -35,7 +35,7 @@ struct llama_sampling_context * llama_sampling_init(const struct llama_sampling_
|
||||||
|
|
||||||
result->prev.resize(params.n_prev);
|
result->prev.resize(params.n_prev);
|
||||||
|
|
||||||
result->n_considered = 0;
|
result->n_valid = 0;
|
||||||
|
|
||||||
llama_sampling_set_rng_seed(result, params.seed);
|
llama_sampling_set_rng_seed(result, params.seed);
|
||||||
|
|
||||||
|
@ -66,7 +66,7 @@ void llama_sampling_reset(llama_sampling_context * ctx) {
|
||||||
|
|
||||||
std::fill(ctx->prev.begin(), ctx->prev.end(), 0);
|
std::fill(ctx->prev.begin(), ctx->prev.end(), 0);
|
||||||
ctx->cur.clear();
|
ctx->cur.clear();
|
||||||
ctx->n_considered = 0;
|
ctx->n_valid = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_sampling_set_rng_seed(struct llama_sampling_context * ctx, uint32_t seed) {
|
void llama_sampling_set_rng_seed(struct llama_sampling_context * ctx, uint32_t seed) {
|
||||||
|
@ -256,7 +256,7 @@ static llama_token llama_sampling_sample_impl(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx_sampling->n_considered = cur_p.size;
|
ctx_sampling->n_valid = temp == 0.0f ? 0 : cur_p.size;
|
||||||
|
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
|
|
@ -81,7 +81,7 @@ struct llama_sampling_context {
|
||||||
// TODO: replace with ring-buffer
|
// TODO: replace with ring-buffer
|
||||||
std::vector<llama_token> prev;
|
std::vector<llama_token> prev;
|
||||||
std::vector<llama_token_data> cur;
|
std::vector<llama_token_data> cur;
|
||||||
size_t n_considered;
|
size_t n_valid; // Number of correct top tokens with correct probabilities.
|
||||||
|
|
||||||
std::mt19937 rng;
|
std::mt19937 rng;
|
||||||
};
|
};
|
||||||
|
|
|
@ -74,6 +74,9 @@ models = [
|
||||||
{"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", },
|
{"name": "qwen2", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/Qwen/Qwen1.5-7B", },
|
||||||
{"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", },
|
{"name": "olmo", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/allenai/OLMo-1.7-7B-hf", },
|
||||||
{"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", },
|
{"name": "dbrx", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/databricks/dbrx-base", },
|
||||||
|
{"name": "jina-en", "tokt": TOKENIZER_TYPE.WPM, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-en", }, # WPM!
|
||||||
|
{"name": "jina-es", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-es", },
|
||||||
|
{"name": "jina-de", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/jinaai/jina-embeddings-v2-base-de", },
|
||||||
]
|
]
|
||||||
|
|
||||||
# make directory "models/tokenizers" if it doesn't exist
|
# make directory "models/tokenizers" if it doesn't exist
|
||||||
|
@ -142,8 +145,17 @@ for model in models:
|
||||||
if tokt == TOKENIZER_TYPE.SPM:
|
if tokt == TOKENIZER_TYPE.SPM:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
# Skip if the tokenizer folder does not exist or there are other download issues previously
|
||||||
|
if not os.path.exists(f"models/tokenizers/{name}"):
|
||||||
|
logger.warning(f"Directory for tokenizer {name} not found. Skipping...")
|
||||||
|
continue
|
||||||
|
|
||||||
# create the tokenizer
|
# create the tokenizer
|
||||||
|
try:
|
||||||
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
||||||
|
except OSError as e:
|
||||||
|
logger.error(f"Error loading tokenizer for model {name}. The model may not exist or is not accessible with the provided token. Error: {e}")
|
||||||
|
continue # Skip to the next model if the tokenizer can't be loaded
|
||||||
|
|
||||||
chktok = tokenizer.encode(chktxt)
|
chktok = tokenizer.encode(chktxt)
|
||||||
chkhsh = sha256(str(chktok).encode()).hexdigest()
|
chkhsh = sha256(str(chktok).encode()).hexdigest()
|
||||||
|
@ -161,6 +173,8 @@ for model in models:
|
||||||
logger.info("normalizer: " + json.dumps(normalizer, indent=4))
|
logger.info("normalizer: " + json.dumps(normalizer, indent=4))
|
||||||
pre_tokenizer = cfg["pre_tokenizer"]
|
pre_tokenizer = cfg["pre_tokenizer"]
|
||||||
logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
|
logger.info("pre_tokenizer: " + json.dumps(pre_tokenizer, indent=4))
|
||||||
|
if "ignore_merges" in cfg["model"]:
|
||||||
|
logger.info("ignore_merges: " + json.dumps(cfg["model"]["ignore_merges"], indent=4))
|
||||||
|
|
||||||
logger.info("")
|
logger.info("")
|
||||||
|
|
||||||
|
@ -282,8 +296,17 @@ for model in models:
|
||||||
name = model["name"]
|
name = model["name"]
|
||||||
tokt = model["tokt"]
|
tokt = model["tokt"]
|
||||||
|
|
||||||
|
# Skip if the tokenizer folder does not exist or there are other download issues previously
|
||||||
|
if not os.path.exists(f"models/tokenizers/{name}"):
|
||||||
|
logger.warning(f"Directory for tokenizer {name} not found. Skipping...")
|
||||||
|
continue
|
||||||
|
|
||||||
# create the tokenizer
|
# create the tokenizer
|
||||||
|
try:
|
||||||
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
tokenizer = AutoTokenizer.from_pretrained(f"models/tokenizers/{name}")
|
||||||
|
except OSError as e:
|
||||||
|
logger.error(f"Failed to load tokenizer for model {name}. Error: {e}")
|
||||||
|
continue # Skip this model and continue with the next one in the loop
|
||||||
|
|
||||||
with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f:
|
with open(f"models/ggml-vocab-{name}.gguf.inp", "w", encoding="utf-8") as f:
|
||||||
for text in tests:
|
for text in tests:
|
||||||
|
|
|
@ -12,7 +12,7 @@ import sys
|
||||||
from enum import IntEnum
|
from enum import IntEnum
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
from hashlib import sha256
|
from hashlib import sha256
|
||||||
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Sequence, TypeVar, cast, overload
|
from typing import TYPE_CHECKING, Any, Callable, ContextManager, Iterable, Iterator, Sequence, TypeVar, cast
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
|
@ -48,7 +48,6 @@ class Model:
|
||||||
|
|
||||||
dir_model: Path
|
dir_model: Path
|
||||||
ftype: int
|
ftype: int
|
||||||
fname_out: Path
|
|
||||||
is_big_endian: bool
|
is_big_endian: bool
|
||||||
endianess: gguf.GGUFEndian
|
endianess: gguf.GGUFEndian
|
||||||
use_temp_file: bool
|
use_temp_file: bool
|
||||||
|
@ -56,20 +55,20 @@ class Model:
|
||||||
part_names: list[str]
|
part_names: list[str]
|
||||||
is_safetensors: bool
|
is_safetensors: bool
|
||||||
hparams: dict[str, Any]
|
hparams: dict[str, Any]
|
||||||
gguf_writer: gguf.GGUFWriter
|
|
||||||
block_count: int
|
block_count: int
|
||||||
tensor_map: gguf.TensorNameMap
|
tensor_map: gguf.TensorNameMap
|
||||||
tensor_names: set[str] | None
|
tensor_names: set[str] | None
|
||||||
|
fname_out: Path
|
||||||
|
gguf_writer: gguf.GGUFWriter
|
||||||
|
|
||||||
# subclasses should define this!
|
# subclasses should define this!
|
||||||
model_arch: gguf.MODEL_ARCH
|
model_arch: gguf.MODEL_ARCH
|
||||||
|
|
||||||
def __init__(self, dir_model: Path, ftype: int, fname_out: Path, is_big_endian: bool, use_temp_file: bool, eager: bool):
|
def __init__(self, dir_model: Path, ftype: gguf.LlamaFileType, fname_out: Path, is_big_endian: bool, use_temp_file: bool, eager: bool):
|
||||||
if self.__class__ == Model:
|
if type(self) is Model:
|
||||||
raise TypeError(f"{self.__class__.__name__!r} should not be directly instantiated")
|
raise TypeError(f"{type(self).__name__!r} should not be directly instantiated")
|
||||||
self.dir_model = dir_model
|
self.dir_model = dir_model
|
||||||
self.ftype = ftype
|
self.ftype = ftype
|
||||||
self.fname_out = fname_out
|
|
||||||
self.is_big_endian = is_big_endian
|
self.is_big_endian = is_big_endian
|
||||||
self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
|
self.endianess = gguf.GGUFEndian.BIG if is_big_endian else gguf.GGUFEndian.LITTLE
|
||||||
self.use_temp_file = use_temp_file
|
self.use_temp_file = use_temp_file
|
||||||
|
@ -79,10 +78,23 @@ class Model:
|
||||||
if not self.is_safetensors:
|
if not self.is_safetensors:
|
||||||
self.part_names = Model.get_model_part_names(self.dir_model, ".bin")
|
self.part_names = Model.get_model_part_names(self.dir_model, ".bin")
|
||||||
self.hparams = Model.load_hparams(self.dir_model)
|
self.hparams = Model.load_hparams(self.dir_model)
|
||||||
self.gguf_writer = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file)
|
|
||||||
self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer"])
|
self.block_count = self.find_hparam(["n_layers", "num_hidden_layers", "n_layer"])
|
||||||
self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
|
self.tensor_map = gguf.get_tensor_name_map(self.model_arch, self.block_count)
|
||||||
self.tensor_names = None
|
self.tensor_names = None
|
||||||
|
if self.ftype == gguf.LlamaFileType.GUESSED:
|
||||||
|
# NOTE: can't use field "torch_dtype" in config.json, because some finetunes lie.
|
||||||
|
_, first_tensor = next(self.get_tensors())
|
||||||
|
if first_tensor.dtype == torch.float16:
|
||||||
|
logger.info(f"choosing --outtype f16 from first tensor type ({first_tensor.dtype})")
|
||||||
|
self.ftype = gguf.LlamaFileType.MOSTLY_F16
|
||||||
|
else:
|
||||||
|
logger.info(f"choosing --outtype bf16 from first tensor type ({first_tensor.dtype})")
|
||||||
|
self.ftype = gguf.LlamaFileType.MOSTLY_BF16
|
||||||
|
ftype_up: str = self.ftype.name.partition("_")[2].upper()
|
||||||
|
ftype_lw: str = ftype_up.lower()
|
||||||
|
# allow templating the file name with the output ftype, useful with the "auto" ftype
|
||||||
|
self.fname_out = fname_out.parent / fname_out.name.format(ftype_lw, outtype=ftype_lw, ftype=ftype_lw, OUTTYPE=ftype_up, FTYPE=ftype_up)
|
||||||
|
self.gguf_writer = gguf.GGUFWriter(self.fname_out, gguf.MODEL_ARCH_NAMES[self.model_arch], endianess=self.endianess, use_temp_file=self.use_temp_file)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def __init_subclass__(cls):
|
def __init_subclass__(cls):
|
||||||
|
@ -142,14 +154,27 @@ class Model:
|
||||||
raise ValueError(f"Mismatch between weight map and model parts for tensor names: {sym_diff}")
|
raise ValueError(f"Mismatch between weight map and model parts for tensor names: {sym_diff}")
|
||||||
|
|
||||||
def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
|
def format_tensor_name(self, key: gguf.MODEL_TENSOR, bid: int | None = None, suffix: str = ".weight") -> str:
|
||||||
name: str = gguf.TENSOR_NAMES[key]
|
|
||||||
if key not in gguf.MODEL_TENSORS[self.model_arch]:
|
if key not in gguf.MODEL_TENSORS[self.model_arch]:
|
||||||
raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
|
raise ValueError(f"Missing {key!r} for MODEL_TENSORS of {self.model_arch!r}")
|
||||||
|
name: str = gguf.TENSOR_NAMES[key]
|
||||||
if "{bid}" in name:
|
if "{bid}" in name:
|
||||||
assert bid is not None
|
assert bid is not None
|
||||||
name = name.format(bid=bid)
|
name = name.format(bid=bid)
|
||||||
return name + suffix
|
return name + suffix
|
||||||
|
|
||||||
|
def match_model_tensor_name(self, name: str, key: gguf.MODEL_TENSOR, bid: int | None, suffix: str = ".weight") -> bool:
|
||||||
|
if key not in gguf.MODEL_TENSORS[self.model_arch]:
|
||||||
|
return False
|
||||||
|
key_name: str = gguf.TENSOR_NAMES[key]
|
||||||
|
if "{bid}" in key_name:
|
||||||
|
if bid is None:
|
||||||
|
return False
|
||||||
|
key_name = key_name.format(bid=bid)
|
||||||
|
else:
|
||||||
|
if bid is not None:
|
||||||
|
return False
|
||||||
|
return name == (key_name + suffix)
|
||||||
|
|
||||||
def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
|
def map_tensor_name(self, name: str, try_suffixes: Sequence[str] = (".weight", ".bias")) -> str:
|
||||||
new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
|
new_name = self.tensor_map.get_name(key=name, try_suffixes=try_suffixes)
|
||||||
if new_name is None:
|
if new_name is None:
|
||||||
|
@ -215,6 +240,23 @@ class Model:
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def write_tensors(self):
|
def write_tensors(self):
|
||||||
|
# same as ggml_compute_fp32_to_bf16 in ggml-impl.h
|
||||||
|
def np_fp32_to_bf16(n: np.ndarray):
|
||||||
|
# force nan to quiet
|
||||||
|
n = np.where((n & 0x7fffffff) > 0x7f800000, (n & 0xffff0000) | (64 << 16), n)
|
||||||
|
# flush subnormals to zero
|
||||||
|
n = np.where((n & 0x7f800000) == 0, n & 0x80000000, n)
|
||||||
|
# round to nearest even
|
||||||
|
n = (n + (0x7fff + ((n >> 16) & 1))) >> 16
|
||||||
|
return n.astype(np.int16)
|
||||||
|
|
||||||
|
# Doing this row-wise is much, much faster than element-wise, hence the signature
|
||||||
|
v_fp32_to_bf16 = np.vectorize(np_fp32_to_bf16, otypes=[np.int16], signature="(n)->(n)")
|
||||||
|
if self.lazy:
|
||||||
|
# TODO: find a way to implicitly wrap np.vectorize functions
|
||||||
|
# NOTE: the type is changed to reflect otypes passed to np.vectorize above
|
||||||
|
v_fp32_to_bf16 = gguf.LazyNumpyTensor._wrap_fn(v_fp32_to_bf16, meta_noop=np.int16)
|
||||||
|
|
||||||
max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
|
max_name_len = max(len(s) for _, s in self.tensor_map.mapping.values()) + len(".weight,")
|
||||||
|
|
||||||
for name, data_torch in self.get_tensors():
|
for name, data_torch in self.get_tensors():
|
||||||
|
@ -239,35 +281,60 @@ class Model:
|
||||||
data: np.ndarray = data # type hint
|
data: np.ndarray = data # type hint
|
||||||
n_dims = len(data.shape)
|
n_dims = len(data.shape)
|
||||||
data_dtype = data.dtype
|
data_dtype = data.dtype
|
||||||
|
data_qtype: gguf.GGMLQuantizationType | None = None
|
||||||
# if f32 desired, convert any float16 to float32
|
|
||||||
if self.ftype == 0 and data_dtype == np.float16:
|
|
||||||
data = data.astype(np.float32)
|
|
||||||
|
|
||||||
# when both are True, f32 should win
|
# when both are True, f32 should win
|
||||||
extra_f32 = self.extra_f32_tensors(name, new_name, bid, n_dims)
|
extra_f32 = self.extra_f32_tensors(name, new_name, bid, n_dims)
|
||||||
extra_f16 = self.extra_f16_tensors(name, new_name, bid, n_dims)
|
extra_f16 = self.extra_f16_tensors(name, new_name, bid, n_dims)
|
||||||
|
|
||||||
# Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
|
# Most of the codebase that takes in 1D tensors or norms only handles F32 tensors
|
||||||
extra_f32 = extra_f32 or n_dims == 1 or new_name.endswith("_norm.weight")
|
# Conditions should closely match those in llama_model_quantize_internal in llama.cpp
|
||||||
|
extra_f32 = any(cond for cond in (
|
||||||
|
extra_f32,
|
||||||
|
n_dims == 1,
|
||||||
|
new_name.endswith("_norm.weight"),
|
||||||
|
))
|
||||||
|
|
||||||
|
# Some tensor types are always in float32
|
||||||
|
extra_f32 = extra_f32 or any(self.match_model_tensor_name(new_name, key, bid) for key in (
|
||||||
|
gguf.MODEL_TENSOR.FFN_GATE_INP,
|
||||||
|
gguf.MODEL_TENSOR.POS_EMBD,
|
||||||
|
gguf.MODEL_TENSOR.TOKEN_TYPES,
|
||||||
|
))
|
||||||
|
|
||||||
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
# if f16 desired, convert any float32 2-dim weight tensors to float16
|
||||||
extra_f16 = extra_f16 or (name.endswith(".weight") and n_dims >= 2)
|
extra_f16 = any(cond for cond in (
|
||||||
|
extra_f16,
|
||||||
|
(name.endswith(".weight") and n_dims >= 2),
|
||||||
|
))
|
||||||
|
|
||||||
# when both extra_f32 and extra_f16 are False, convert to float32 by default
|
if self.ftype != gguf.LlamaFileType.ALL_F32 and extra_f16 and not extra_f32:
|
||||||
if self.ftype == 1 and data_dtype == np.float16 and (extra_f32 or not extra_f16):
|
if self.ftype == gguf.LlamaFileType.MOSTLY_F16:
|
||||||
data = data.astype(np.float32)
|
if data_dtype != np.float16:
|
||||||
|
|
||||||
if self.ftype == 1 and data_dtype == np.float32 and extra_f16 and not extra_f32:
|
|
||||||
data = data.astype(np.float16)
|
data = data.astype(np.float16)
|
||||||
|
data_qtype = gguf.GGMLQuantizationType.F16
|
||||||
|
|
||||||
|
elif self.ftype == gguf.LlamaFileType.MOSTLY_BF16:
|
||||||
|
if data_dtype != np.float32:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
data = v_fp32_to_bf16(data.view(np.int32))
|
||||||
|
assert data.dtype == np.int16
|
||||||
|
data_qtype = gguf.GGMLQuantizationType.BF16
|
||||||
|
|
||||||
|
else: # by default, convert to float32
|
||||||
|
if data_dtype != np.float32:
|
||||||
|
data = data.astype(np.float32)
|
||||||
|
data_qtype = gguf.GGMLQuantizationType.F32
|
||||||
|
|
||||||
|
assert data_qtype is not None
|
||||||
|
|
||||||
# reverse shape to make it similar to the internal ggml dimension order
|
# reverse shape to make it similar to the internal ggml dimension order
|
||||||
shape_str = f"{{{', '.join(str(n) for n in reversed(data.shape))}}}"
|
shape_str = f"{{{', '.join(str(n) for n in reversed(data.shape))}}}"
|
||||||
|
|
||||||
# n_dims is implicit in the shape
|
# n_dims is implicit in the shape
|
||||||
logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data.dtype}, shape = {shape_str}")
|
logger.info(f"{f'%-{max_name_len}s' % f'{new_name},'} {old_dtype} --> {data_qtype.name}, shape = {shape_str}")
|
||||||
|
|
||||||
self.gguf_writer.add_tensor(new_name, data)
|
self.gguf_writer.add_tensor(new_name, data, raw_dtype=data_qtype)
|
||||||
|
|
||||||
def write(self):
|
def write(self):
|
||||||
self.write_tensors()
|
self.write_tensors()
|
||||||
|
@ -404,8 +471,17 @@ class Model:
|
||||||
# ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
|
# ref: https://huggingface.co/allenai/OLMo-1.7-7B-hf
|
||||||
res = "olmo"
|
res = "olmo"
|
||||||
if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
|
if chkhsh == "a8594e3edff7c29c003940395316294b2c623e09894deebbc65f33f1515df79e":
|
||||||
# ref: https://huggingface.co/databricks/dbrx-instruct
|
# ref: https://huggingface.co/databricks/dbrx-base
|
||||||
res = "dbrx"
|
res = "dbrx"
|
||||||
|
if chkhsh == "0876d13b50744004aa9aeae05e7b0647eac9d801b5ba4668afc01e709c15e19f":
|
||||||
|
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-en
|
||||||
|
res = "jina-en"
|
||||||
|
if chkhsh == "171aeeedd6fb548d418a7461d053f11b6f1f1fc9b387bd66640d28a4b9f5c643":
|
||||||
|
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-es
|
||||||
|
res = "jina-es"
|
||||||
|
if chkhsh == "27949a2493fc4a9f53f5b9b029c82689cfbe5d3a1929bb25e043089e28466de6":
|
||||||
|
# ref: https://huggingface.co/jinaai/jina-embeddings-v2-base-de
|
||||||
|
res = "jina-de"
|
||||||
|
|
||||||
if res is None:
|
if res is None:
|
||||||
logger.warning("\n")
|
logger.warning("\n")
|
||||||
|
@ -1013,6 +1089,18 @@ class StarCoderModel(Model):
|
||||||
class RefactModel(Model):
|
class RefactModel(Model):
|
||||||
model_arch = gguf.MODEL_ARCH.REFACT
|
model_arch = gguf.MODEL_ARCH.REFACT
|
||||||
|
|
||||||
|
def set_vocab(self):
|
||||||
|
super().set_vocab()
|
||||||
|
|
||||||
|
# TODO: how to determine special FIM tokens automatically?
|
||||||
|
special_vocab = gguf.SpecialVocab(self.dir_model, load_merges=False,
|
||||||
|
special_token_types = ['prefix', 'suffix', 'middle', 'fsep', 'eot'])
|
||||||
|
special_vocab._set_special_token("prefix", 1)
|
||||||
|
special_vocab._set_special_token("suffix", 3)
|
||||||
|
special_vocab._set_special_token("middle", 2)
|
||||||
|
special_vocab._set_special_token("fsep", 4) # is this correct?
|
||||||
|
special_vocab.add_to_gguf(self.gguf_writer)
|
||||||
|
|
||||||
def set_gguf_parameters(self):
|
def set_gguf_parameters(self):
|
||||||
hidden_dim = self.hparams["n_embd"]
|
hidden_dim = self.hparams["n_embd"]
|
||||||
inner_dim = 4 * hidden_dim
|
inner_dim = 4 * hidden_dim
|
||||||
|
@ -2023,12 +2111,6 @@ class BertModel(Model):
|
||||||
|
|
||||||
return [(self.map_tensor_name(name), data_torch)]
|
return [(self.map_tensor_name(name), data_torch)]
|
||||||
|
|
||||||
def extra_f32_tensors(self, name: str, new_name: str, bid: int | None, n_dims: int) -> bool:
|
|
||||||
del new_name, bid, n_dims # unused
|
|
||||||
|
|
||||||
# not used with get_rows, must be F32
|
|
||||||
return name == "embeddings.token_type_embeddings.weight"
|
|
||||||
|
|
||||||
|
|
||||||
@Model.register("NomicBertModel")
|
@Model.register("NomicBertModel")
|
||||||
class NomicBertModel(BertModel):
|
class NomicBertModel(BertModel):
|
||||||
|
@ -2277,96 +2359,81 @@ class OlmoModel(Model):
|
||||||
return [(self.map_tensor_name(name), data_torch)]
|
return [(self.map_tensor_name(name), data_torch)]
|
||||||
|
|
||||||
|
|
||||||
|
@Model.register("JinaBertModel", "JinaBertForMaskedLM")
|
||||||
|
class JinaBertV2Model(BertModel):
|
||||||
|
model_arch = gguf.MODEL_ARCH.JINA_BERT_V2
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super().__init__(*args, **kwargs)
|
||||||
|
self.intermediate_size = self.hparams["intermediate_size"]
|
||||||
|
|
||||||
|
def get_tensors(self):
|
||||||
|
for name, data in super().get_tensors():
|
||||||
|
if 'gated_layers' in name:
|
||||||
|
d1 = data[:self.intermediate_size, :]
|
||||||
|
name1 = name.replace('gated_layers', 'gated_layers_w')
|
||||||
|
d2 = data[self.intermediate_size:, :]
|
||||||
|
name2 = name.replace('gated_layers', 'gated_layers_v')
|
||||||
|
yield name1, d1
|
||||||
|
yield name2, d2
|
||||||
|
continue
|
||||||
|
|
||||||
|
yield name, data
|
||||||
|
|
||||||
|
def set_vocab(self, *args, **kwargs):
|
||||||
|
tokenizer_class = 'BertTokenizer'
|
||||||
|
with open(self.dir_model / "tokenizer_config.json", "r", encoding="utf-8") as f:
|
||||||
|
tokenizer_class = json.load(f)['tokenizer_class']
|
||||||
|
|
||||||
|
if tokenizer_class == 'BertTokenizer':
|
||||||
|
super().set_vocab()
|
||||||
|
elif tokenizer_class == 'RobertaTokenizer':
|
||||||
|
self._set_vocab_gpt2()
|
||||||
|
self.gguf_writer.add_token_type_count(2)
|
||||||
|
else:
|
||||||
|
raise NotImplementedError(f'Tokenizer {tokenizer_class} is not supported for JinaBertModel')
|
||||||
|
self.gguf_writer.add_add_bos_token(True)
|
||||||
|
self.gguf_writer.add_add_eos_token(True)
|
||||||
|
|
||||||
|
|
||||||
###### CONVERSION LOGIC ######
|
###### CONVERSION LOGIC ######
|
||||||
|
|
||||||
|
|
||||||
# tree of lazy tensors
|
# tree of lazy tensors
|
||||||
class LazyTorchTensor:
|
class LazyTorchTensor(gguf.LazyBase):
|
||||||
_meta: Tensor
|
_tensor_type = torch.Tensor
|
||||||
_data: Tensor | None
|
# to keep the type-checker happy
|
||||||
_args: tuple
|
dtype: torch.dtype
|
||||||
_func: Callable[[tuple], Tensor] | None
|
shape: torch.Size
|
||||||
|
|
||||||
def __init__(self, *, meta: Tensor, data: Tensor | None = None, args: tuple = (), func: Callable[[tuple], Tensor] | None = None):
|
|
||||||
self._meta = meta
|
|
||||||
self._data = data
|
|
||||||
self._args = args
|
|
||||||
self._func = func
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _recurse_apply(o: Any, fn: Callable[[Any], Any]) -> Any:
|
|
||||||
# TODO: dict and set
|
|
||||||
if isinstance(o, (list, tuple)):
|
|
||||||
L = []
|
|
||||||
for item in o:
|
|
||||||
L.append(LazyTorchTensor._recurse_apply(item, fn))
|
|
||||||
if isinstance(o, tuple):
|
|
||||||
L = tuple(L)
|
|
||||||
return L
|
|
||||||
elif isinstance(o, LazyTorchTensor):
|
|
||||||
return fn(o)
|
|
||||||
else:
|
|
||||||
return o
|
|
||||||
|
|
||||||
def _wrap_fn(self, fn: Callable, use_self: bool = False) -> Callable[[Any], LazyTorchTensor]:
|
|
||||||
def wrapped_fn(*args, **kwargs):
|
|
||||||
if kwargs is None:
|
|
||||||
kwargs = {}
|
|
||||||
args = ((self,) if use_self else ()) + args
|
|
||||||
|
|
||||||
meta_args = LazyTorchTensor._recurse_apply(args, lambda t: t._meta)
|
|
||||||
|
|
||||||
return LazyTorchTensor(meta=fn(*meta_args, **kwargs), args=args, func=lambda a: fn(*a, **kwargs))
|
|
||||||
return wrapped_fn
|
|
||||||
|
|
||||||
def __getattr__(self, __name: str) -> Any:
|
|
||||||
meta_attr = getattr(self._meta, __name)
|
|
||||||
if callable(meta_attr):
|
|
||||||
return self._wrap_fn(getattr(torch.Tensor, __name), use_self=True)
|
|
||||||
elif isinstance(meta_attr, torch.Tensor):
|
|
||||||
# for things like self.T
|
|
||||||
return self._wrap_fn(lambda s: getattr(s, __name))(self)
|
|
||||||
else:
|
|
||||||
return meta_attr
|
|
||||||
|
|
||||||
|
# only used when converting a torch.Tensor to a np.ndarray
|
||||||
_dtype_map: dict[torch.dtype, type] = {
|
_dtype_map: dict[torch.dtype, type] = {
|
||||||
torch.float16: np.float16,
|
torch.float16: np.float16,
|
||||||
torch.float32: np.float32,
|
torch.float32: np.float32,
|
||||||
}
|
}
|
||||||
|
|
||||||
def numpy(self) -> gguf.LazyTensor:
|
def numpy(self) -> gguf.LazyNumpyTensor:
|
||||||
dtype = self._dtype_map[self.dtype]
|
dtype = self._dtype_map[self.dtype]
|
||||||
return gguf.LazyTensor(lambda: LazyTorchTensor.to_eager(self).numpy(), dtype=dtype, shape=self.shape)
|
return gguf.LazyNumpyTensor(
|
||||||
|
meta=np.lib.stride_tricks.as_strided(np.zeros(1, dtype), self.shape, (0 for _ in self.shape)),
|
||||||
|
lazy=self._lazy,
|
||||||
|
args=(self,),
|
||||||
|
func=(lambda s: s[0].numpy())
|
||||||
|
)
|
||||||
|
|
||||||
@overload
|
@classmethod
|
||||||
@staticmethod
|
def eager_to_meta(cls, t: Tensor) -> Tensor:
|
||||||
def to_eager(t: Tensor | LazyTorchTensor) -> Tensor: ...
|
if t.is_meta:
|
||||||
|
|
||||||
@overload
|
|
||||||
@staticmethod
|
|
||||||
def to_eager(t: tuple) -> tuple: ...
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def to_eager(t: Any) -> Any:
|
|
||||||
def simple_to_eager(_t: LazyTorchTensor) -> Tensor:
|
|
||||||
# wake up the lazy tensor
|
|
||||||
if _t._data is None and _t._func is not None:
|
|
||||||
# recurse into its arguments
|
|
||||||
_t._args = LazyTorchTensor.to_eager(_t._args)
|
|
||||||
_t._data = _t._func(_t._args)
|
|
||||||
if _t._data is not None:
|
|
||||||
return _t._data
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Could not compute lazy tensor {_t!r} with args {_t._args!r}")
|
|
||||||
|
|
||||||
# recurse into lists and/or tuples, keeping their structure
|
|
||||||
return LazyTorchTensor._recurse_apply(t, simple_to_eager)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def from_eager(t: Tensor) -> Tensor:
|
|
||||||
if (t.__class__ == LazyTorchTensor):
|
|
||||||
return t
|
return t
|
||||||
return LazyTorchTensor(meta=t.detach().to("meta"), data=t) # type: ignore
|
return t.detach().to("meta")
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def meta_with_dtype(cls, m: Tensor, dtype: torch.dtype) -> Tensor:
|
||||||
|
m = m.detach()
|
||||||
|
if not m.is_meta:
|
||||||
|
m = m.to("meta")
|
||||||
|
m.dtype = dtype
|
||||||
|
return m
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
def __torch_function__(cls, func, types, args=(), kwargs=None):
|
||||||
|
@ -2377,28 +2444,8 @@ class LazyTorchTensor:
|
||||||
|
|
||||||
if func is torch.Tensor.numpy:
|
if func is torch.Tensor.numpy:
|
||||||
return args[0].numpy()
|
return args[0].numpy()
|
||||||
if func is torch.equal:
|
|
||||||
eager_args = LazyTorchTensor.to_eager(args)
|
|
||||||
return func(*eager_args, **kwargs)
|
|
||||||
|
|
||||||
return LazyTorchTensor._wrap_fn(args[0], func)(*args, **kwargs)
|
return LazyTorchTensor._wrap_fn(func)(*args, **kwargs)
|
||||||
|
|
||||||
# special methods bypass __getattr__, so they need to be added manually
|
|
||||||
# ref: https://docs.python.org/3/reference/datamodel.html#special-lookup
|
|
||||||
# NOTE: LazyTorchTensor can't be a subclass of Tensor (and then be used
|
|
||||||
# as self._meta is currently used), because then the following
|
|
||||||
# operations would by default not be wrapped, and so not propagated
|
|
||||||
# when the tensor is made eager.
|
|
||||||
# It's better to get non-silent errors for not-yet-supported operators.
|
|
||||||
# TODO: add more when needed to avoid clutter, or find a more concise way
|
|
||||||
def __neg__(self, *args): # mamba
|
|
||||||
return self._wrap_fn(torch.Tensor.__neg__)(self, *args)
|
|
||||||
|
|
||||||
def __add__(self, *args): # gemma
|
|
||||||
return self._wrap_fn(torch.Tensor.__add__)(self, *args)
|
|
||||||
|
|
||||||
def __getitem__(self, *args): # bloom falcon refact internlm2
|
|
||||||
return self._wrap_fn(torch.Tensor.__getitem__)(self, *args)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args() -> argparse.Namespace:
|
def parse_args() -> argparse.Namespace:
|
||||||
|
@ -2414,11 +2461,11 @@ def parse_args() -> argparse.Namespace:
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--outfile", type=Path,
|
"--outfile", type=Path,
|
||||||
help="path to write to; default: based on input",
|
help="path to write to; default: based on input. {ftype} will be replaced by the outtype.",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--outtype", type=str, choices=["f32", "f16"], default="f16",
|
"--outtype", type=str, choices=["f32", "f16", "bf16", "auto"], default="f16",
|
||||||
help="output format - use f32 for float32, f16 for float16",
|
help="output format - use f32 for float32, f16 for float16, bf16 for bfloat16, auto for the highest-fidelity 16-bit float type depending on the first loaded tensor type",
|
||||||
)
|
)
|
||||||
parser.add_argument(
|
parser.add_argument(
|
||||||
"--bigendian", action="store_true",
|
"--bigendian", action="store_true",
|
||||||
|
@ -2472,16 +2519,18 @@ def main() -> None:
|
||||||
logger.error(f'Error: {args.model} is not a directory')
|
logger.error(f'Error: {args.model} is not a directory')
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
ftype_map = {
|
ftype_map: dict[str, gguf.LlamaFileType] = {
|
||||||
"f32": gguf.GGMLQuantizationType.F32,
|
"f32": gguf.LlamaFileType.ALL_F32,
|
||||||
"f16": gguf.GGMLQuantizationType.F16,
|
"f16": gguf.LlamaFileType.MOSTLY_F16,
|
||||||
|
"bf16": gguf.LlamaFileType.MOSTLY_BF16,
|
||||||
|
"auto": gguf.LlamaFileType.GUESSED,
|
||||||
}
|
}
|
||||||
|
|
||||||
if args.outfile is not None:
|
if args.outfile is not None:
|
||||||
fname_out = args.outfile
|
fname_out = args.outfile
|
||||||
else:
|
else:
|
||||||
# output in the same directory as the model by default
|
# output in the same directory as the model by default
|
||||||
fname_out = dir_model / f'ggml-model-{args.outtype}.gguf'
|
fname_out = dir_model / 'ggml-model-{ftype}.gguf'
|
||||||
|
|
||||||
logger.info(f"Loading model: {dir_model.name}")
|
logger.info(f"Loading model: {dir_model.name}")
|
||||||
|
|
||||||
|
@ -2497,14 +2546,16 @@ def main() -> None:
|
||||||
logger.info("Set model tokenizer")
|
logger.info("Set model tokenizer")
|
||||||
model_instance.set_vocab()
|
model_instance.set_vocab()
|
||||||
|
|
||||||
|
model_instance.gguf_writer.add_quantization_version(gguf.GGML_QUANT_VERSION)
|
||||||
|
|
||||||
if args.vocab_only:
|
if args.vocab_only:
|
||||||
logger.info(f"Exporting model vocab to '{fname_out}'")
|
logger.info(f"Exporting model vocab to '{model_instance.fname_out}'")
|
||||||
model_instance.write_vocab()
|
model_instance.write_vocab()
|
||||||
else:
|
else:
|
||||||
logger.info(f"Exporting model to '{fname_out}'")
|
logger.info(f"Exporting model to '{model_instance.fname_out}'")
|
||||||
model_instance.write()
|
model_instance.write()
|
||||||
|
|
||||||
logger.info(f"Model successfully exported to '{fname_out}'")
|
logger.info(f"Model successfully exported to '{model_instance.fname_out}'")
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
88
docs/debugging-tests.md
Normal file
88
docs/debugging-tests.md
Normal file
|
@ -0,0 +1,88 @@
|
||||||
|
# Debugging Tests Tips
|
||||||
|
|
||||||
|
## How to run & debug a specific test without anything else to keep the feedback loop short?
|
||||||
|
|
||||||
|
There is a script called debug-test.sh in the scripts folder whose parameter takes a REGEX and an optional test number.
|
||||||
|
|
||||||
|
For example, running the following command will output an interactive list from which you can select a test. It takes this form:
|
||||||
|
|
||||||
|
`debug-test.sh [OPTION]... <test_regex> <test_number>`
|
||||||
|
|
||||||
|
It will then build & run in the debugger for you.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
./scripts/debug-test.sh test-tokenizer
|
||||||
|
|
||||||
|
# Once in the debugger, i.e. at the chevrons prompt, setting a breakpoint could be as follows:
|
||||||
|
>>> b main
|
||||||
|
```
|
||||||
|
|
||||||
|
For further reference use `debug-test.sh -h` to print help.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
### How does the script work?
|
||||||
|
If you want to be able to use the concepts contained in the script separately, the important ones are briefly outlined below.
|
||||||
|
|
||||||
|
#### Step 1: Reset and Setup folder context
|
||||||
|
|
||||||
|
From base of this repository, let's create `build-ci-debug` as our build context.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
rm -rf build-ci-debug && mkdir build-ci-debug && cd build-ci-debug
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 2: Setup Build Environment and Compile Test Binaries
|
||||||
|
|
||||||
|
Setup and trigger a build under debug mode. You may adapt the arguments as needed, but in this case these are sane defaults.
|
||||||
|
|
||||||
|
```bash
|
||||||
|
cmake -DCMAKE_BUILD_TYPE=Debug -DLLAMA_CUDA=1 -DLLAMA_FATAL_WARNINGS=ON ..
|
||||||
|
make -j
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Step 3.1: Identify Test Command for Debugging
|
||||||
|
|
||||||
|
The output of this command will give you the command & arguments needed to run GDB.
|
||||||
|
|
||||||
|
* `-R test-tokenizer` : looks for all the test files named `test-tokenizer*` (R=Regex)
|
||||||
|
* `-N` : "show-only" disables test execution & shows test commands that you can feed to GDB.
|
||||||
|
* `-V` : Verbose Mode
|
||||||
|
|
||||||
|
```bash
|
||||||
|
ctest -R "test-tokenizer" -V -N
|
||||||
|
```
|
||||||
|
|
||||||
|
This may return output similar to below (focusing on key lines to pay attention to):
|
||||||
|
|
||||||
|
```bash
|
||||||
|
...
|
||||||
|
1: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf"
|
||||||
|
1: Working Directory: .
|
||||||
|
Labels: main
|
||||||
|
Test #1: test-tokenizer-0-llama-spm
|
||||||
|
...
|
||||||
|
4: Test command: ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-falcon.gguf"
|
||||||
|
4: Working Directory: .
|
||||||
|
Labels: main
|
||||||
|
Test #4: test-tokenizer-0-falcon
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
So for test #1 we can tell these two pieces of relevant information:
|
||||||
|
* Test Binary: `~/llama.cpp/build-ci-debug/bin/test-tokenizer-0`
|
||||||
|
* Test GGUF Model: `~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf`
|
||||||
|
|
||||||
|
#### Step 3.2: Run GDB on test command
|
||||||
|
|
||||||
|
Based on the ctest 'test command' report above we can then run a gdb session via this command below:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gdb --args ${Test Binary} ${Test GGUF Model}
|
||||||
|
```
|
||||||
|
|
||||||
|
Example:
|
||||||
|
|
||||||
|
```bash
|
||||||
|
gdb --args ~/llama.cpp/build-ci-debug/bin/test-tokenizer-0 "~/llama.cpp/tests/../models/ggml-vocab-llama-spm.gguf"
|
||||||
|
```
|
|
@ -49,6 +49,12 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
|
||||||
}
|
}
|
||||||
|
|
||||||
float * out = output + batch.seq_id[i][0] * n_embd;
|
float * out = output + batch.seq_id[i][0] * n_embd;
|
||||||
|
//TODO: I would also add a parameter here to enable normalization or not.
|
||||||
|
/*fprintf(stdout, "unnormalized_embedding:");
|
||||||
|
for (int hh = 0; hh < n_embd; hh++) {
|
||||||
|
fprintf(stdout, "%9.6f ", embd[hh]);
|
||||||
|
}
|
||||||
|
fprintf(stdout, "\n");*/
|
||||||
llama_embd_normalize(embd, out, n_embd);
|
llama_embd_normalize(embd, out, n_embd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -123,10 +129,12 @@ int main(int argc, char ** argv) {
|
||||||
inputs.push_back(inp);
|
inputs.push_back(inp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// add SEP if not present
|
// check if the last token is SEP
|
||||||
|
// it should be automatically added by the tokenizer when 'tokenizer.ggml.add_eos_token' is set to 'true'
|
||||||
for (auto & inp : inputs) {
|
for (auto & inp : inputs) {
|
||||||
if (inp.empty() || inp.back() != llama_token_sep(model)) {
|
if (inp.empty() || inp.back() != llama_token_sep(model)) {
|
||||||
inp.push_back(llama_token_sep(model));
|
fprintf(stderr, "%s: warning: last token in the prompt is not SEP\n", __func__);
|
||||||
|
fprintf(stderr, "%s: 'tokenizer.ggml.add_eos_token' should be set to 'true' in the GGUF header\n", __func__);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,16 +26,21 @@ options:
|
||||||
-m, --model <filename> (default: models/7B/ggml-model-q4_0.gguf)
|
-m, --model <filename> (default: models/7B/ggml-model-q4_0.gguf)
|
||||||
-p, --n-prompt <n> (default: 512)
|
-p, --n-prompt <n> (default: 512)
|
||||||
-n, --n-gen <n> (default: 128)
|
-n, --n-gen <n> (default: 128)
|
||||||
-b, --batch-size <n> (default: 512)
|
-pg <pp,tg> (default: 512,128)
|
||||||
-ctk <t>, --cache-type-k <t> (default: f16)
|
-b, --batch-size <n> (default: 2048)
|
||||||
-ctv <t>, --cache-type-v <t> (default: f16)
|
-ub, --ubatch-size <n> (default: 512)
|
||||||
-t, --threads <n> (default: 112)
|
-ctk, --cache-type-k <t> (default: f16)
|
||||||
|
-ctv, --cache-type-v <t> (default: f16)
|
||||||
|
-t, --threads <n> (default: 16)
|
||||||
-ngl, --n-gpu-layers <n> (default: 99)
|
-ngl, --n-gpu-layers <n> (default: 99)
|
||||||
-sm, --split-mode <none|layer|row> (default: layer)
|
-sm, --split-mode <none|layer|row> (default: layer)
|
||||||
-mg, --main-gpu <i> (default: 0)
|
-mg, --main-gpu <i> (default: 0)
|
||||||
-nkvo, --no-kv-offload <0|1> (default: 0)
|
-nkvo, --no-kv-offload <0|1> (default: 0)
|
||||||
|
-fa, --flash-attn <0|1> (default: 0)
|
||||||
-mmp, --mmap <0|1> (default: 1)
|
-mmp, --mmap <0|1> (default: 1)
|
||||||
-ts, --tensor_split <ts0/ts1/..> (default: 0)
|
--numa <distribute|isolate|numactl> (default: disabled)
|
||||||
|
-embd, --embeddings <0|1> (default: 0)
|
||||||
|
-ts, --tensor-split <ts0/ts1/..> (default: 0)
|
||||||
-r, --repetitions <n> (default: 5)
|
-r, --repetitions <n> (default: 5)
|
||||||
-o, --output <csv|json|md|sql> (default: md)
|
-o, --output <csv|json|md|sql> (default: md)
|
||||||
-v, --verbose (default: 0)
|
-v, --verbose (default: 0)
|
||||||
|
@ -43,10 +48,11 @@ options:
|
||||||
Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.
|
Multiple values can be given for each parameter by separating them with ',' or by specifying the parameter multiple times.
|
||||||
```
|
```
|
||||||
|
|
||||||
llama-bench can perform two types of tests:
|
llama-bench can perform three types of tests:
|
||||||
|
|
||||||
- Prompt processing (pp): processing a prompt in batches (`-p`)
|
- Prompt processing (pp): processing a prompt in batches (`-p`)
|
||||||
- Text generation (tg): generating a sequence of tokens (`-n`)
|
- Text generation (tg): generating a sequence of tokens (`-n`)
|
||||||
|
- Prompt processing + text generation (pg): processing a prompt followed by generating a sequence of tokens (`-pg`)
|
||||||
|
|
||||||
With the exception of `-r`, `-o` and `-v`, all options can be specified multiple times to run multiple tests. Each pp and tg test is run with all combinations of the specified options. To specify multiple values for an option, the values can be separated by commas (e.g. `-n 16,32`), or the option can be specified multiple times (e.g. `-n 16 -n 32`).
|
With the exception of `-r`, `-o` and `-v`, all options can be specified multiple times to run multiple tests. Each pp and tg test is run with all combinations of the specified options. To specify multiple values for an option, the values can be separated by commas (e.g. `-n 16,32`), or the option can be specified multiple times (e.g. `-n 16 -n 32`).
|
||||||
|
|
||||||
|
|
|
@ -161,10 +161,17 @@ static const char * split_mode_str(llama_split_mode mode) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static std::string pair_str(const std::pair<int, int> & p) {
|
||||||
|
static char buf[32];
|
||||||
|
snprintf(buf, sizeof(buf), "%d,%d", p.first, p.second);
|
||||||
|
return buf;
|
||||||
|
}
|
||||||
|
|
||||||
struct cmd_params {
|
struct cmd_params {
|
||||||
std::vector<std::string> model;
|
std::vector<std::string> model;
|
||||||
std::vector<int> n_prompt;
|
std::vector<int> n_prompt;
|
||||||
std::vector<int> n_gen;
|
std::vector<int> n_gen;
|
||||||
|
std::vector<std::pair<int, int>> n_pg;
|
||||||
std::vector<int> n_batch;
|
std::vector<int> n_batch;
|
||||||
std::vector<int> n_ubatch;
|
std::vector<int> n_ubatch;
|
||||||
std::vector<ggml_type> type_k;
|
std::vector<ggml_type> type_k;
|
||||||
|
@ -188,6 +195,7 @@ static const cmd_params cmd_params_defaults = {
|
||||||
/* model */ {"models/7B/ggml-model-q4_0.gguf"},
|
/* model */ {"models/7B/ggml-model-q4_0.gguf"},
|
||||||
/* n_prompt */ {512},
|
/* n_prompt */ {512},
|
||||||
/* n_gen */ {128},
|
/* n_gen */ {128},
|
||||||
|
/* n_pg */ {{512, 128}},
|
||||||
/* n_batch */ {2048},
|
/* n_batch */ {2048},
|
||||||
/* n_ubatch */ {512},
|
/* n_ubatch */ {512},
|
||||||
/* type_k */ {GGML_TYPE_F16},
|
/* type_k */ {GGML_TYPE_F16},
|
||||||
|
@ -215,10 +223,11 @@ static void print_usage(int /* argc */, char ** argv) {
|
||||||
printf(" -m, --model <filename> (default: %s)\n", join(cmd_params_defaults.model, ",").c_str());
|
printf(" -m, --model <filename> (default: %s)\n", join(cmd_params_defaults.model, ",").c_str());
|
||||||
printf(" -p, --n-prompt <n> (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str());
|
printf(" -p, --n-prompt <n> (default: %s)\n", join(cmd_params_defaults.n_prompt, ",").c_str());
|
||||||
printf(" -n, --n-gen <n> (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str());
|
printf(" -n, --n-gen <n> (default: %s)\n", join(cmd_params_defaults.n_gen, ",").c_str());
|
||||||
|
printf(" -pg <pp,tg> (default: %s)\n", join(transform_to_str(cmd_params_defaults.n_pg, pair_str), ",").c_str());
|
||||||
printf(" -b, --batch-size <n> (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str());
|
printf(" -b, --batch-size <n> (default: %s)\n", join(cmd_params_defaults.n_batch, ",").c_str());
|
||||||
printf(" -ub N, --ubatch-size <n> (default: %s)\n", join(cmd_params_defaults.n_ubatch, ",").c_str());
|
printf(" -ub, --ubatch-size <n> (default: %s)\n", join(cmd_params_defaults.n_ubatch, ",").c_str());
|
||||||
printf(" -ctk <t>, --cache-type-k <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str());
|
printf(" -ctk, --cache-type-k <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_k, ggml_type_name), ",").c_str());
|
||||||
printf(" -ctv <t>, --cache-type-v <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str());
|
printf(" -ctv, --cache-type-v <t> (default: %s)\n", join(transform_to_str(cmd_params_defaults.type_v, ggml_type_name), ",").c_str());
|
||||||
printf(" -t, --threads <n> (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
|
printf(" -t, --threads <n> (default: %s)\n", join(cmd_params_defaults.n_threads, ",").c_str());
|
||||||
printf(" -ngl, --n-gpu-layers <n> (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str());
|
printf(" -ngl, --n-gpu-layers <n> (default: %s)\n", join(cmd_params_defaults.n_gpu_layers, ",").c_str());
|
||||||
printf(" -sm, --split-mode <none|layer|row> (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str());
|
printf(" -sm, --split-mode <none|layer|row> (default: %s)\n", join(transform_to_str(cmd_params_defaults.split_mode, split_mode_str), ",").c_str());
|
||||||
|
@ -304,6 +313,17 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
auto p = split<int>(argv[i], split_delim);
|
auto p = split<int>(argv[i], split_delim);
|
||||||
params.n_gen.insert(params.n_gen.end(), p.begin(), p.end());
|
params.n_gen.insert(params.n_gen.end(), p.begin(), p.end());
|
||||||
|
} else if (arg == "-pg") {
|
||||||
|
if (++i >= argc) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
auto p = split<std::string>(argv[i], ',');
|
||||||
|
if (p.size() != 2) {
|
||||||
|
invalid_param = true;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
params.n_pg.push_back({std::stoi(p[0]), std::stoi(p[1])});
|
||||||
} else if (arg == "-b" || arg == "--batch-size") {
|
} else if (arg == "-b" || arg == "--batch-size") {
|
||||||
if (++i >= argc) {
|
if (++i >= argc) {
|
||||||
invalid_param = true;
|
invalid_param = true;
|
||||||
|
@ -493,6 +513,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
||||||
if (params.model.empty()) { params.model = cmd_params_defaults.model; }
|
if (params.model.empty()) { params.model = cmd_params_defaults.model; }
|
||||||
if (params.n_prompt.empty()) { params.n_prompt = cmd_params_defaults.n_prompt; }
|
if (params.n_prompt.empty()) { params.n_prompt = cmd_params_defaults.n_prompt; }
|
||||||
if (params.n_gen.empty()) { params.n_gen = cmd_params_defaults.n_gen; }
|
if (params.n_gen.empty()) { params.n_gen = cmd_params_defaults.n_gen; }
|
||||||
|
if (params.n_pg.empty()) { params.n_pg = cmd_params_defaults.n_pg; }
|
||||||
if (params.n_batch.empty()) { params.n_batch = cmd_params_defaults.n_batch; }
|
if (params.n_batch.empty()) { params.n_batch = cmd_params_defaults.n_batch; }
|
||||||
if (params.n_ubatch.empty()) { params.n_ubatch = cmd_params_defaults.n_ubatch; }
|
if (params.n_ubatch.empty()) { params.n_ubatch = cmd_params_defaults.n_ubatch; }
|
||||||
if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; }
|
if (params.type_k.empty()) { params.type_k = cmd_params_defaults.type_k; }
|
||||||
|
@ -632,6 +653,31 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
|
||||||
};
|
};
|
||||||
instances.push_back(instance);
|
instances.push_back(instance);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (const auto & n_pg : params.n_pg) {
|
||||||
|
if (n_pg.first == 0 && n_pg.second == 0) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
cmd_params_instance instance = {
|
||||||
|
/* .model = */ m,
|
||||||
|
/* .n_prompt = */ n_pg.first,
|
||||||
|
/* .n_gen = */ n_pg.second,
|
||||||
|
/* .n_batch = */ nb,
|
||||||
|
/* .n_ubatch = */ nub,
|
||||||
|
/* .type_k = */ tk,
|
||||||
|
/* .type_v = */ tv,
|
||||||
|
/* .n_threads = */ nt,
|
||||||
|
/* .n_gpu_layers = */ nl,
|
||||||
|
/* .split_mode = */ sm,
|
||||||
|
/* .main_gpu = */ mg,
|
||||||
|
/* .no_kv_offload= */ nkvo,
|
||||||
|
/* .flash_attn = */ fa,
|
||||||
|
/* .tensor_split = */ ts,
|
||||||
|
/* .use_mmap = */ mmp,
|
||||||
|
/* .embeddings = */ embd,
|
||||||
|
};
|
||||||
|
instances.push_back(instance);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return instances;
|
return instances;
|
||||||
|
@ -965,6 +1011,9 @@ struct markdown_printer : public printer {
|
||||||
if (field == "n_gpu_layers") {
|
if (field == "n_gpu_layers") {
|
||||||
return 3;
|
return 3;
|
||||||
}
|
}
|
||||||
|
if (field == "test") {
|
||||||
|
return 13;
|
||||||
|
}
|
||||||
|
|
||||||
int width = std::max((int)field.length(), 10);
|
int width = std::max((int)field.length(), 10);
|
||||||
|
|
||||||
|
@ -1091,12 +1140,11 @@ struct markdown_printer : public printer {
|
||||||
value = test::get_backend();
|
value = test::get_backend();
|
||||||
} else if (field == "test") {
|
} else if (field == "test") {
|
||||||
if (t.n_prompt > 0 && t.n_gen == 0) {
|
if (t.n_prompt > 0 && t.n_gen == 0) {
|
||||||
snprintf(buf, sizeof(buf), "pp %d", t.n_prompt);
|
snprintf(buf, sizeof(buf), "pp%d", t.n_prompt);
|
||||||
} else if (t.n_gen > 0 && t.n_prompt == 0) {
|
} else if (t.n_gen > 0 && t.n_prompt == 0) {
|
||||||
snprintf(buf, sizeof(buf), "tg %d", t.n_gen);
|
snprintf(buf, sizeof(buf), "tg%d", t.n_gen);
|
||||||
} else {
|
} else {
|
||||||
assert(false);
|
snprintf(buf, sizeof(buf), "pp%d+tg%d", t.n_prompt, t.n_gen);
|
||||||
exit(1);
|
|
||||||
}
|
}
|
||||||
value = buf;
|
value = buf;
|
||||||
} else if (field == "t/s") {
|
} else if (field == "t/s") {
|
||||||
|
@ -1297,6 +1345,7 @@ int main(int argc, char ** argv) {
|
||||||
llama_kv_cache_clear(ctx);
|
llama_kv_cache_clear(ctx);
|
||||||
|
|
||||||
uint64_t t_start = get_time_ns();
|
uint64_t t_start = get_time_ns();
|
||||||
|
|
||||||
if (t.n_prompt > 0) {
|
if (t.n_prompt > 0) {
|
||||||
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
|
test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads);
|
||||||
}
|
}
|
||||||
|
|
|
@ -104,6 +104,7 @@ static std::string format(const char * fmt, ...) {
|
||||||
#define TN_POS_EMBD "%s.position_embd.weight"
|
#define TN_POS_EMBD "%s.position_embd.weight"
|
||||||
#define TN_CLASS_EMBD "v.class_embd"
|
#define TN_CLASS_EMBD "v.class_embd"
|
||||||
#define TN_PATCH_EMBD "v.patch_embd.weight"
|
#define TN_PATCH_EMBD "v.patch_embd.weight"
|
||||||
|
#define TN_PATCH_BIAS "v.patch_embd.bias"
|
||||||
#define TN_ATTN_K "%s.blk.%d.attn_k.%s"
|
#define TN_ATTN_K "%s.blk.%d.attn_k.%s"
|
||||||
#define TN_ATTN_Q "%s.blk.%d.attn_q.%s"
|
#define TN_ATTN_Q "%s.blk.%d.attn_q.%s"
|
||||||
#define TN_ATTN_V "%s.blk.%d.attn_v.%s"
|
#define TN_ATTN_V "%s.blk.%d.attn_v.%s"
|
||||||
|
@ -425,6 +426,7 @@ struct clip_vision_model {
|
||||||
// embeddings
|
// embeddings
|
||||||
struct ggml_tensor * class_embedding;
|
struct ggml_tensor * class_embedding;
|
||||||
struct ggml_tensor * patch_embeddings;
|
struct ggml_tensor * patch_embeddings;
|
||||||
|
struct ggml_tensor * patch_bias;
|
||||||
struct ggml_tensor * position_embeddings;
|
struct ggml_tensor * position_embeddings;
|
||||||
|
|
||||||
struct ggml_tensor * pre_ln_w;
|
struct ggml_tensor * pre_ln_w;
|
||||||
|
@ -501,6 +503,11 @@ struct clip_ctx {
|
||||||
bool use_gelu = false;
|
bool use_gelu = false;
|
||||||
int32_t ftype = 1;
|
int32_t ftype = 1;
|
||||||
|
|
||||||
|
bool has_class_embedding = true;
|
||||||
|
bool has_pre_norm = true;
|
||||||
|
bool has_post_norm = false;
|
||||||
|
bool has_patch_bias = false;
|
||||||
|
|
||||||
struct gguf_context * ctx_gguf;
|
struct gguf_context * ctx_gguf;
|
||||||
struct ggml_context * ctx_data;
|
struct ggml_context * ctx_data;
|
||||||
|
|
||||||
|
@ -526,7 +533,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||||
const int patch_size = hparams.patch_size;
|
const int patch_size = hparams.patch_size;
|
||||||
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
||||||
const int num_patches_per_side = image_size / patch_size; GGML_UNUSED(num_patches_per_side);
|
const int num_patches_per_side = image_size / patch_size; GGML_UNUSED(num_patches_per_side);
|
||||||
const int num_positions = num_patches + 1;
|
const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0);
|
||||||
const int hidden_size = hparams.hidden_size;
|
const int hidden_size = hparams.hidden_size;
|
||||||
const int n_head = hparams.n_head;
|
const int n_head = hparams.n_head;
|
||||||
const int d_head = hidden_size / n_head;
|
const int d_head = hidden_size / n_head;
|
||||||
|
@ -557,16 +564,23 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||||
inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size);
|
inp = ggml_reshape_3d(ctx0, inp, num_patches, hidden_size, batch_size);
|
||||||
inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3));
|
inp = ggml_cont(ctx0, ggml_permute(ctx0, inp, 1, 0, 2, 3));
|
||||||
|
|
||||||
|
if (ctx->has_patch_bias) {
|
||||||
|
// inp = ggml_add(ctx0, inp, ggml_repeat(ctx0, model.patch_bias, inp));
|
||||||
|
inp = ggml_add(ctx0, inp, model.patch_bias);
|
||||||
|
}
|
||||||
|
|
||||||
// concat class_embeddings and patch_embeddings
|
// concat class_embeddings and patch_embeddings
|
||||||
struct ggml_tensor * embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
|
struct ggml_tensor * embeddings = inp;
|
||||||
|
if (ctx->has_class_embedding) {
|
||||||
|
embeddings = ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, hidden_size, num_positions, batch_size);
|
||||||
ggml_set_name(embeddings, "embeddings");
|
ggml_set_name(embeddings, "embeddings");
|
||||||
ggml_set_input(embeddings);
|
ggml_set_input(embeddings);
|
||||||
|
|
||||||
embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
|
embeddings = ggml_acc(ctx0, embeddings, model.class_embedding,
|
||||||
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
|
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], 0);
|
||||||
|
|
||||||
embeddings = ggml_acc(ctx0, embeddings, inp,
|
embeddings = ggml_acc(ctx0, embeddings, inp,
|
||||||
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
|
embeddings->nb[1], embeddings->nb[2], embeddings->nb[3], model.class_embedding->nb[1]);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
|
struct ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_positions);
|
||||||
ggml_set_name(positions, "positions");
|
ggml_set_name(positions, "positions");
|
||||||
|
@ -576,7 +590,7 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||||
ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions));
|
ggml_add(ctx0, embeddings, ggml_get_rows(ctx0, model.position_embeddings, positions));
|
||||||
|
|
||||||
// pre-layernorm
|
// pre-layernorm
|
||||||
{
|
if (ctx->has_pre_norm) {
|
||||||
embeddings = ggml_norm(ctx0, embeddings, eps);
|
embeddings = ggml_norm(ctx0, embeddings, eps);
|
||||||
ggml_set_name(embeddings, "pre_ln");
|
ggml_set_name(embeddings, "pre_ln");
|
||||||
|
|
||||||
|
@ -664,6 +678,14 @@ static ggml_cgraph * clip_image_build_graph(clip_ctx * ctx, const clip_image_f32
|
||||||
embeddings = cur;
|
embeddings = cur;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// post-layernorm
|
||||||
|
if (ctx->has_post_norm) {
|
||||||
|
embeddings = ggml_norm(ctx0, embeddings, eps);
|
||||||
|
ggml_set_name(embeddings, "post_ln");
|
||||||
|
|
||||||
|
embeddings = ggml_add(ctx0, ggml_mul(ctx0, embeddings, model.post_ln_w), model.post_ln_b);
|
||||||
|
}
|
||||||
|
|
||||||
// llava projector
|
// llava projector
|
||||||
{
|
{
|
||||||
embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
|
embeddings = ggml_reshape_2d(ctx0, embeddings, embeddings->ne[0], embeddings->ne[1]);
|
||||||
|
@ -1149,11 +1171,38 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
|
|
||||||
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
|
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
|
||||||
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
|
new_clip->has_class_embedding = true;
|
||||||
|
} catch (const std::exception& e) {
|
||||||
|
new_clip->has_class_embedding = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
|
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
|
||||||
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
|
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
|
||||||
|
new_clip->has_pre_norm = true;
|
||||||
|
} catch (std::exception & e) {
|
||||||
|
new_clip->has_pre_norm = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight"));
|
||||||
|
vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias"));
|
||||||
|
new_clip->has_post_norm = true;
|
||||||
|
} catch (std::exception & e) {
|
||||||
|
new_clip->has_post_norm = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
vision_model.patch_bias = get_tensor(new_clip->ctx_data, TN_PATCH_BIAS);
|
||||||
|
new_clip->has_patch_bias = true;
|
||||||
|
} catch (std::exception & e) {
|
||||||
|
new_clip->has_patch_bias = false;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
|
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
|
||||||
|
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
|
||||||
} catch(const std::exception& e) {
|
} catch(const std::exception& e) {
|
||||||
LOG_TEE("%s: failed to load vision model tensors\n", __func__);
|
LOG_TEE("%s: failed to load vision model tensors\n", __func__);
|
||||||
}
|
}
|
||||||
|
@ -1797,7 +1846,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
||||||
const int image_size = hparams.image_size;
|
const int image_size = hparams.image_size;
|
||||||
const int patch_size = hparams.patch_size;
|
const int patch_size = hparams.patch_size;
|
||||||
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
const int num_patches = ((image_size / patch_size) * (image_size / patch_size));
|
||||||
const int num_positions = num_patches + 1;
|
const int num_positions = num_patches + (ctx->has_class_embedding ? 1 : 0);
|
||||||
|
|
||||||
{
|
{
|
||||||
struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw");
|
struct ggml_tensor * inp_raw = ggml_graph_get_tensor(gf, "inp_raw");
|
||||||
|
@ -1825,6 +1874,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
if (ctx->has_class_embedding) {
|
||||||
struct ggml_tensor * embeddings = ggml_graph_get_tensor(gf, "embeddings");
|
struct ggml_tensor * embeddings = ggml_graph_get_tensor(gf, "embeddings");
|
||||||
|
|
||||||
void* zero_mem = malloc(ggml_nbytes(embeddings));
|
void* zero_mem = malloc(ggml_nbytes(embeddings));
|
||||||
|
@ -1832,6 +1882,7 @@ bool clip_image_batch_encode(clip_ctx * ctx, const int n_threads, const clip_ima
|
||||||
ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings));
|
ggml_backend_tensor_set(embeddings, zero_mem, 0, ggml_nbytes(embeddings));
|
||||||
free(zero_mem);
|
free(zero_mem);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
|
struct ggml_tensor * positions = ggml_graph_get_tensor(gf, "positions");
|
||||||
|
|
|
@ -189,6 +189,11 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
|
||||||
LOG_TEE("\n");
|
LOG_TEE("\n");
|
||||||
|
|
||||||
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
|
struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams);
|
||||||
|
if (!ctx_sampling) {
|
||||||
|
fprintf(stderr, "%s: failed to initialize sampling subsystem\n", __func__);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
std::string response = "";
|
std::string response = "";
|
||||||
for (int i = 0; i < max_tgt_len; i++) {
|
for (int i = 0; i < max_tgt_len; i++) {
|
||||||
const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
|
const char * tmp = sample(ctx_sampling, ctx_llava->ctx_llama, &n_past);
|
||||||
|
|
|
@ -523,6 +523,10 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
|
struct llama_sampling_context * ctx_sampling = llama_sampling_init(sparams);
|
||||||
|
if (!ctx_sampling) {
|
||||||
|
fprintf(stderr, "%s: failed to initialize sampling subsystem\n", __func__);
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
|
||||||
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
|
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
|
||||||
// predict
|
// predict
|
||||||
|
@ -879,7 +883,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
||||||
const auto line_inp = ::llama_tokenize(ctx, buffer, false, false);
|
const auto line_inp = ::llama_tokenize(ctx, buffer, false, params.interactive_specials);
|
||||||
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
||||||
|
|
||||||
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
||||||
|
|
|
@ -651,9 +651,6 @@ struct server_context {
|
||||||
std::string system_prompt;
|
std::string system_prompt;
|
||||||
std::vector<llama_token> system_tokens;
|
std::vector<llama_token> system_tokens;
|
||||||
|
|
||||||
std::string name_user; // this should be the antiprompt
|
|
||||||
std::string name_assistant;
|
|
||||||
|
|
||||||
// slots / clients
|
// slots / clients
|
||||||
std::vector<server_slot> slots;
|
std::vector<server_slot> slots;
|
||||||
json default_generation_settings_for_props;
|
json default_generation_settings_for_props;
|
||||||
|
@ -673,6 +670,8 @@ struct server_context {
|
||||||
llama_free_model(model);
|
llama_free_model(model);
|
||||||
model = nullptr;
|
model = nullptr;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
llama_batch_free(batch);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool load_model(const gpt_params & params_) {
|
bool load_model(const gpt_params & params_) {
|
||||||
|
@ -1098,15 +1097,11 @@ struct server_context {
|
||||||
system_need_update = false;
|
system_need_update = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void system_prompt_set(const json & sys_props) {
|
bool system_prompt_set(const std::string & sys_prompt) {
|
||||||
system_prompt = sys_props.value("prompt", "");
|
system_prompt = sys_prompt;
|
||||||
name_user = sys_props.value("anti_prompt", "");
|
|
||||||
name_assistant = sys_props.value("assistant_name", "");
|
|
||||||
|
|
||||||
LOG_VERBOSE("system prompt process", {
|
LOG_VERBOSE("system prompt process", {
|
||||||
{"system_prompt", system_prompt},
|
{"system_prompt", system_prompt},
|
||||||
{"name_user", name_user},
|
|
||||||
{"name_assistant", name_assistant},
|
|
||||||
});
|
});
|
||||||
|
|
||||||
// release all slots
|
// release all slots
|
||||||
|
@ -1115,6 +1110,7 @@ struct server_context {
|
||||||
}
|
}
|
||||||
|
|
||||||
system_need_update = true;
|
system_need_update = true;
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool process_token(completion_token_output & result, server_slot & slot) {
|
bool process_token(completion_token_output & result, server_slot & slot) {
|
||||||
|
@ -1534,7 +1530,8 @@ struct server_context {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (task.data.contains("system_prompt")) {
|
if (task.data.contains("system_prompt")) {
|
||||||
system_prompt_set(task.data.at("system_prompt"));
|
std::string sys_prompt = json_value(task.data, "system_prompt", std::string());
|
||||||
|
system_prompt_set(sys_prompt);
|
||||||
|
|
||||||
for (server_slot & slot : slots) {
|
for (server_slot & slot : slots) {
|
||||||
slot.n_past = 0;
|
slot.n_past = 0;
|
||||||
|
@ -2270,10 +2267,10 @@ struct server_context {
|
||||||
|
|
||||||
const size_t n_probs = std::min(cur_p.size, (size_t) slot.sparams.n_probs);
|
const size_t n_probs = std::min(cur_p.size, (size_t) slot.sparams.n_probs);
|
||||||
if (n_probs > 0) {
|
if (n_probs > 0) {
|
||||||
const size_t n_considered = slot.ctx_sampling->n_considered;
|
const size_t n_valid = slot.ctx_sampling->n_valid;
|
||||||
|
|
||||||
// Make sure at least n_probs top tokens are at the front of the vector:
|
// Make sure at least n_probs top tokens are at the front of the vector:
|
||||||
if (slot.sparams.temp == 0.0f && n_probs > n_considered) {
|
if (slot.sparams.temp == 0.0f && n_probs > n_valid) {
|
||||||
llama_sample_top_k(ctx, &cur_p, n_probs, 0);
|
llama_sample_top_k(ctx, &cur_p, n_probs, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2289,7 +2286,7 @@ struct server_context {
|
||||||
for (size_t i = 0; i < n_probs; ++i) {
|
for (size_t i = 0; i < n_probs; ++i) {
|
||||||
result.probs.push_back({
|
result.probs.push_back({
|
||||||
cur_p.data[i].id,
|
cur_p.data[i].id,
|
||||||
i >= n_considered ? 0.0f : cur_p.data[i].p // Tokens filtered out due to e.g. top_k have 0 probability.
|
i >= n_valid ? 0.0f : cur_p.data[i].p // Tokens filtered out due to e.g. top_k have 0 probability.
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2920,7 +2917,7 @@ int main(int argc, char ** argv) {
|
||||||
server_params_parse(argc, argv, sparams, params);
|
server_params_parse(argc, argv, sparams, params);
|
||||||
|
|
||||||
if (!sparams.system_prompt.empty()) {
|
if (!sparams.system_prompt.empty()) {
|
||||||
ctx_server.system_prompt_set(json::parse(sparams.system_prompt));
|
ctx_server.system_prompt_set(sparams.system_prompt);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.model_alias == "unknown") {
|
if (params.model_alias == "unknown") {
|
||||||
|
@ -3409,8 +3406,7 @@ int main(int argc, char ** argv) {
|
||||||
const auto handle_props = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
|
const auto handle_props = [&ctx_server](const httplib::Request & req, httplib::Response & res) {
|
||||||
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
res.set_header("Access-Control-Allow-Origin", req.get_header_value("Origin"));
|
||||||
json data = {
|
json data = {
|
||||||
{ "user_name", ctx_server.name_user.c_str() },
|
{ "system_prompt", ctx_server.system_prompt.c_str() },
|
||||||
{ "assistant_name", ctx_server.name_assistant.c_str() },
|
|
||||||
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
|
{ "default_generation_settings", ctx_server.default_generation_settings_for_props },
|
||||||
{ "total_slots", ctx_server.params.n_parallel }
|
{ "total_slots", ctx_server.params.n_parallel }
|
||||||
};
|
};
|
||||||
|
|
|
@ -1182,9 +1182,9 @@ static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, st
|
||||||
static char * fmt_size(size_t size) {
|
static char * fmt_size(size_t size) {
|
||||||
static char buffer[128];
|
static char buffer[128];
|
||||||
if (size >= 1024*1024) {
|
if (size >= 1024*1024) {
|
||||||
sprintf(buffer, "%zuM", size/1024/1024);
|
snprintf(buffer, sizeof(buffer), "%zuM", size/1024/1024);
|
||||||
} else {
|
} else {
|
||||||
sprintf(buffer, "%zuK", size/1024);
|
snprintf(buffer, sizeof(buffer), "%zuK", size/1024);
|
||||||
}
|
}
|
||||||
return buffer;
|
return buffer;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,6 @@
|
||||||
|
|
||||||
#include "ggml-cuda/common.cuh"
|
#include "ggml-cuda/common.cuh"
|
||||||
#include "ggml-cuda/acc.cuh"
|
#include "ggml-cuda/acc.cuh"
|
||||||
#include "ggml-cuda/alibi.cuh"
|
|
||||||
#include "ggml-cuda/arange.cuh"
|
#include "ggml-cuda/arange.cuh"
|
||||||
#include "ggml-cuda/argsort.cuh"
|
#include "ggml-cuda/argsort.cuh"
|
||||||
#include "ggml-cuda/binbcast.cuh"
|
#include "ggml-cuda/binbcast.cuh"
|
||||||
|
@ -2205,6 +2204,9 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
||||||
case GGML_UNARY_OP_RELU:
|
case GGML_UNARY_OP_RELU:
|
||||||
ggml_cuda_op_relu(ctx, dst);
|
ggml_cuda_op_relu(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
case GGML_UNARY_OP_SIGMOID:
|
||||||
|
ggml_cuda_op_sigmoid(ctx, dst);
|
||||||
|
break;
|
||||||
case GGML_UNARY_OP_HARDSIGMOID:
|
case GGML_UNARY_OP_HARDSIGMOID:
|
||||||
ggml_cuda_op_hardsigmoid(ctx, dst);
|
ggml_cuda_op_hardsigmoid(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
@ -2277,9 +2279,6 @@ static bool ggml_cuda_compute_forward(ggml_backend_cuda_context & ctx, struct gg
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
ggml_cuda_op_rope(ctx, dst);
|
ggml_cuda_op_rope(ctx, dst);
|
||||||
break;
|
break;
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
ggml_cuda_op_alibi(ctx, dst);
|
|
||||||
break;
|
|
||||||
case GGML_OP_IM2COL:
|
case GGML_OP_IM2COL:
|
||||||
ggml_cuda_op_im2col(ctx, dst);
|
ggml_cuda_op_im2col(ctx, dst);
|
||||||
break;
|
break;
|
||||||
|
@ -2720,6 +2719,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
||||||
case GGML_UNARY_OP_GELU:
|
case GGML_UNARY_OP_GELU:
|
||||||
case GGML_UNARY_OP_SILU:
|
case GGML_UNARY_OP_SILU:
|
||||||
case GGML_UNARY_OP_RELU:
|
case GGML_UNARY_OP_RELU:
|
||||||
|
case GGML_UNARY_OP_SIGMOID:
|
||||||
case GGML_UNARY_OP_HARDSIGMOID:
|
case GGML_UNARY_OP_HARDSIGMOID:
|
||||||
case GGML_UNARY_OP_HARDSWISH:
|
case GGML_UNARY_OP_HARDSWISH:
|
||||||
case GGML_UNARY_OP_GELU_QUICK:
|
case GGML_UNARY_OP_GELU_QUICK:
|
||||||
|
@ -2829,7 +2829,6 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
|
||||||
case GGML_OP_DIAG_MASK_INF:
|
case GGML_OP_DIAG_MASK_INF:
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
case GGML_OP_IM2COL:
|
case GGML_OP_IM2COL:
|
||||||
case GGML_OP_POOL_2D:
|
case GGML_OP_POOL_2D:
|
||||||
case GGML_OP_SUM_ROWS:
|
case GGML_OP_SUM_ROWS:
|
||||||
|
|
|
@ -1,63 +0,0 @@
|
||||||
#include "alibi.cuh"
|
|
||||||
|
|
||||||
static __global__ void alibi_f32(const float * x, float * dst, const int ncols, const int k_rows,
|
|
||||||
const int n_heads_log2_floor, const float m0, const float m1) {
|
|
||||||
const int col = blockDim.x*blockIdx.x + threadIdx.x;
|
|
||||||
|
|
||||||
if (col >= ncols) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int row = blockDim.y*blockIdx.y + threadIdx.y;
|
|
||||||
const int i = row*ncols + col;
|
|
||||||
|
|
||||||
const int k = row/k_rows;
|
|
||||||
|
|
||||||
float m_k;
|
|
||||||
if (k < n_heads_log2_floor) {
|
|
||||||
m_k = powf(m0, k + 1);
|
|
||||||
} else {
|
|
||||||
m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
dst[i] = col * m_k + x[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
static void alibi_f32_cuda(const float * x, float * dst, const int ncols, const int nrows,
|
|
||||||
const int k_rows, const int n_heads_log2_floor, const float m0,
|
|
||||||
const float m1, cudaStream_t stream) {
|
|
||||||
const dim3 block_dims(CUDA_ALIBI_BLOCK_SIZE, 1, 1);
|
|
||||||
const int num_blocks_x = (ncols + CUDA_ALIBI_BLOCK_SIZE - 1) / (CUDA_ALIBI_BLOCK_SIZE);
|
|
||||||
const dim3 block_nums(num_blocks_x, nrows, 1);
|
|
||||||
alibi_f32<<<block_nums, block_dims, 0, stream>>>(x, dst, ncols, k_rows, n_heads_log2_floor, m0, m1);
|
|
||||||
}
|
|
||||||
|
|
||||||
void ggml_cuda_op_alibi(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
|
||||||
const ggml_tensor * src0 = dst->src[0];
|
|
||||||
const float * src0_d = (const float *)src0->data;
|
|
||||||
float * dst_d = (float *)dst->data;
|
|
||||||
cudaStream_t stream = ctx.stream();
|
|
||||||
|
|
||||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
|
||||||
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
|
||||||
|
|
||||||
const int64_t ne00 = src0->ne[0];
|
|
||||||
const int64_t ne01 = src0->ne[1];
|
|
||||||
const int64_t ne02 = src0->ne[2];
|
|
||||||
const int64_t nrows = ggml_nrows(src0);
|
|
||||||
|
|
||||||
//const int n_past = ((int32_t *) dst->op_params)[0];
|
|
||||||
const int n_head = ((int32_t *) dst->op_params)[1];
|
|
||||||
float max_bias;
|
|
||||||
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
|
|
||||||
|
|
||||||
//GGML_ASSERT(ne01 + n_past == ne00);
|
|
||||||
GGML_ASSERT(n_head == ne02);
|
|
||||||
|
|
||||||
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
|
||||||
|
|
||||||
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
|
|
||||||
|
|
||||||
alibi_f32_cuda(src0_d, dst_d, ne00, nrows, ne01, n_heads_log2_floor, m0, m1, stream);
|
|
||||||
}
|
|
|
@ -1,5 +0,0 @@
|
||||||
#include "common.cuh"
|
|
||||||
|
|
||||||
#define CUDA_ALIBI_BLOCK_SIZE 32
|
|
||||||
|
|
||||||
void ggml_cuda_op_alibi(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
|
|
@ -23,6 +23,10 @@ static __global__ void flash_attn_vec_ext_f16(
|
||||||
float * __restrict__ dst,
|
float * __restrict__ dst,
|
||||||
float2 * __restrict__ dst_meta,
|
float2 * __restrict__ dst_meta,
|
||||||
const float scale,
|
const float scale,
|
||||||
|
const float max_bias,
|
||||||
|
const float m0,
|
||||||
|
const float m1,
|
||||||
|
const uint32_t n_head_log2,
|
||||||
const int ne00,
|
const int ne00,
|
||||||
const int ne01,
|
const int ne01,
|
||||||
const int ne02,
|
const int ne02,
|
||||||
|
@ -58,6 +62,18 @@ static __global__ void flash_attn_vec_ext_f16(
|
||||||
const int stride_KV = nb11 / sizeof(half);
|
const int stride_KV = nb11 / sizeof(half);
|
||||||
const int stride_KV2 = nb11 / sizeof(half2);
|
const int stride_KV2 = nb11 / sizeof(half2);
|
||||||
|
|
||||||
|
half slopeh = __float2half(1.0f);
|
||||||
|
|
||||||
|
// ALiBi
|
||||||
|
if (max_bias > 0.0f) {
|
||||||
|
const int h = blockIdx.y;
|
||||||
|
|
||||||
|
const float base = h < n_head_log2 ? m0 : m1;
|
||||||
|
const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
||||||
|
|
||||||
|
slopeh = __float2half(powf(base, exph));
|
||||||
|
}
|
||||||
|
|
||||||
static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64.");
|
static_assert(D % (2*WARP_SIZE) == 0, "D not divisible by 2*WARP_SIZE == 64.");
|
||||||
constexpr int nwarps = D / WARP_SIZE;
|
constexpr int nwarps = D / WARP_SIZE;
|
||||||
const int tid = WARP_SIZE*threadIdx.y + threadIdx.x;
|
const int tid = WARP_SIZE*threadIdx.y + threadIdx.x;
|
||||||
|
@ -141,7 +157,7 @@ static __global__ void flash_attn_vec_ext_f16(
|
||||||
for (int j = 0; j < ncols; ++j) {
|
for (int j = 0; j < ncols; ++j) {
|
||||||
sum2[j] = warp_reduce_sum(sum2[j]);
|
sum2[j] = warp_reduce_sum(sum2[j]);
|
||||||
half sum = __low2half(sum2[j]) + __high2half(sum2[j]);
|
half sum = __low2half(sum2[j]) + __high2half(sum2[j]);
|
||||||
sum += mask ? maskh[j*ne11 + k_VKQ_0 + i_KQ] : __float2half(0.0f);
|
sum += mask ? slopeh*maskh[j*ne11 + k_VKQ_0 + i_KQ] : __float2half(0.0f);
|
||||||
|
|
||||||
if (ncols == 1) {
|
if (ncols == 1) {
|
||||||
kqmax_new = ggml_cuda_hmax(kqmax_new, sum);
|
kqmax_new = ggml_cuda_hmax(kqmax_new, sum);
|
||||||
|
@ -249,6 +265,10 @@ static __global__ void flash_attn_ext_f16(
|
||||||
float * __restrict__ dst,
|
float * __restrict__ dst,
|
||||||
float2 * __restrict__ dst_meta,
|
float2 * __restrict__ dst_meta,
|
||||||
const float scale,
|
const float scale,
|
||||||
|
const float max_bias,
|
||||||
|
const float m0,
|
||||||
|
const float m1,
|
||||||
|
const uint32_t n_head_log2,
|
||||||
const int ne00,
|
const int ne00,
|
||||||
const int ne01,
|
const int ne01,
|
||||||
const int ne02,
|
const int ne02,
|
||||||
|
@ -305,6 +325,20 @@ static __global__ void flash_attn_ext_f16(
|
||||||
const int stride_Q = nb01 / sizeof(float);
|
const int stride_Q = nb01 / sizeof(float);
|
||||||
const int stride_KV = nb11 / sizeof(half);
|
const int stride_KV = nb11 / sizeof(half);
|
||||||
|
|
||||||
|
half slopeh = __float2half(1.0f);
|
||||||
|
half2 slope2 = make_half2(1.0f, 1.0f);
|
||||||
|
|
||||||
|
// ALiBi
|
||||||
|
if (max_bias > 0.0f) {
|
||||||
|
const int h = blockIdx.y;
|
||||||
|
|
||||||
|
const float base = h < n_head_log2 ? m0 : m1;
|
||||||
|
const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
||||||
|
|
||||||
|
slopeh = __float2half(powf(base, exph));
|
||||||
|
slope2 = make_half2(slopeh, slopeh);
|
||||||
|
}
|
||||||
|
|
||||||
frag_b Q_b[D/16][ncols/frag_n];
|
frag_b Q_b[D/16][ncols/frag_n];
|
||||||
|
|
||||||
// A single buffer for temporarily holding tiles of KQ and VKQ parts:
|
// A single buffer for temporarily holding tiles of KQ and VKQ parts:
|
||||||
|
@ -421,7 +455,7 @@ static __global__ void flash_attn_ext_f16(
|
||||||
for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
|
for (int k0 = 0; k0 < FATTN_KQ_STRIDE; k0 += WARP_SIZE) {
|
||||||
const int k = k0 + threadIdx.x;
|
const int k = k0 + threadIdx.x;
|
||||||
|
|
||||||
KQ_f_tmp[k0/WARP_SIZE] += mask ? __half2float(maskh[j*(nb31/sizeof(half)) + k_VKQ_0 + k]) : 0.0f;
|
KQ_f_tmp[k0/WARP_SIZE] += mask ? __half2float(slopeh*maskh[j*(nb31/sizeof(half)) + k_VKQ_0 + k]) : 0.0f;
|
||||||
KQ_max_new = max(KQ_max_new, KQ_f_tmp[k0/WARP_SIZE]);
|
KQ_max_new = max(KQ_max_new, KQ_f_tmp[k0/WARP_SIZE]);
|
||||||
}
|
}
|
||||||
KQ_max_new = warp_reduce_max(KQ_max_new);
|
KQ_max_new = warp_reduce_max(KQ_max_new);
|
||||||
|
@ -464,7 +498,7 @@ static __global__ void flash_attn_ext_f16(
|
||||||
for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
|
for (int k0 = 0; k0 < FATTN_KQ_STRIDE/2; k0 += WARP_SIZE) {
|
||||||
const int k = k0 + threadIdx.x;
|
const int k = k0 + threadIdx.x;
|
||||||
|
|
||||||
KQ2_tmp[k0/WARP_SIZE] += mask ? mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f);
|
KQ2_tmp[k0/WARP_SIZE] += mask ? slope2*mask2[(j*ne11 + k_VKQ_0)/2 + k] : make_half2(0.0f, 0.0f);
|
||||||
KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]);
|
KQ_max_new = ggml_cuda_hmax2(KQ_max_new, KQ2_tmp[k0/WARP_SIZE]);
|
||||||
}
|
}
|
||||||
KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new))));
|
KQ_max_new = __half2half2(warp_reduce_max(ggml_cuda_hmax(__low2half(KQ_max_new), __high2half(KQ_max_new))));
|
||||||
|
@ -710,8 +744,17 @@ template <int D, int cols_per_block, int parallel_blocks> void launch_fattn_vec_
|
||||||
const dim3 blocks_num(parallel_blocks*((Q->ne[1] + cols_per_block - 1) / cols_per_block), Q->ne[2], Q->ne[3]);
|
const dim3 blocks_num(parallel_blocks*((Q->ne[1] + cols_per_block - 1) / cols_per_block), Q->ne[2], Q->ne[3]);
|
||||||
const int shmem = 0;
|
const int shmem = 0;
|
||||||
|
|
||||||
float scale;
|
float scale = 1.0f;
|
||||||
memcpy(&scale, KQV->op_params, sizeof(float));
|
float max_bias = 0.0f;
|
||||||
|
|
||||||
|
memcpy(&scale, (float *) KQV->op_params + 0, sizeof(float));
|
||||||
|
memcpy(&max_bias, (float *) KQV->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
const uint32_t n_head = Q->ne[2];
|
||||||
|
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
|
||||||
|
|
||||||
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
|
||||||
flash_attn_vec_ext_f16<D, cols_per_block, parallel_blocks>
|
flash_attn_vec_ext_f16<D, cols_per_block, parallel_blocks>
|
||||||
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
||||||
|
@ -720,7 +763,7 @@ template <int D, int cols_per_block, int parallel_blocks> void launch_fattn_vec_
|
||||||
(const char *) V->data,
|
(const char *) V->data,
|
||||||
mask ? ((const char *) mask->data) : nullptr,
|
mask ? ((const char *) mask->data) : nullptr,
|
||||||
parallel_blocks == 1 ? (float *) KQV->data : dst_tmp.ptr, dst_tmp_meta.ptr,
|
parallel_blocks == 1 ? (float *) KQV->data : dst_tmp.ptr, dst_tmp_meta.ptr,
|
||||||
scale,
|
scale, max_bias, m0, m1, n_head_log2,
|
||||||
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
||||||
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
||||||
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
||||||
|
@ -761,8 +804,17 @@ template <int D, int cols_per_block, int nwarps, int parallel_blocks, typename K
|
||||||
const dim3 blocks_num(parallel_blocks*(Q->ne[1] + cols_per_block - 1) / cols_per_block, Q->ne[2], Q->ne[3]);
|
const dim3 blocks_num(parallel_blocks*(Q->ne[1] + cols_per_block - 1) / cols_per_block, Q->ne[2], Q->ne[3]);
|
||||||
const int shmem = 0;
|
const int shmem = 0;
|
||||||
|
|
||||||
float scale;
|
float scale = 1.0f;
|
||||||
memcpy(&scale, KQV->op_params, sizeof(float));
|
float max_bias = 0.0f;
|
||||||
|
|
||||||
|
memcpy(&scale, (float *) KQV->op_params + 0, sizeof(float));
|
||||||
|
memcpy(&max_bias, (float *) KQV->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
const uint32_t n_head = Q->ne[2];
|
||||||
|
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
|
||||||
|
|
||||||
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
|
||||||
flash_attn_ext_f16<D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t>
|
flash_attn_ext_f16<D, cols_per_block, nwarps, get_VKQ_stride(D, nwarps, frag_m), parallel_blocks, KQ_acc_t>
|
||||||
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
<<<blocks_num, block_dim, shmem, main_stream>>> (
|
||||||
|
@ -771,7 +823,7 @@ template <int D, int cols_per_block, int nwarps, int parallel_blocks, typename K
|
||||||
(const char *) V->data,
|
(const char *) V->data,
|
||||||
mask ? ((const char *) mask->data) : nullptr,
|
mask ? ((const char *) mask->data) : nullptr,
|
||||||
(parallel_blocks) == 1 ? (float *) KQV->data : dst_tmp.ptr, dst_tmp_meta.ptr,
|
(parallel_blocks) == 1 ? (float *) KQV->data : dst_tmp.ptr, dst_tmp_meta.ptr,
|
||||||
scale,
|
scale, max_bias, m0, m1, n_head_log2,
|
||||||
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
Q->ne[0], Q->ne[1], Q->ne[2], Q->ne[3],
|
||||||
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
K->ne[0], K->ne[1], K->ne[2], K->ne[3],
|
||||||
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
mask ? mask->ne[1] : 0, mask ? mask->nb[1] : 0,
|
||||||
|
@ -837,7 +889,7 @@ void ggml_cuda_flash_attn_ext(ggml_backend_cuda_context & ctx, ggml_tensor * dst
|
||||||
const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc;
|
const int cc = ggml_cuda_info().devices[ggml_cuda_get_device()].cc;
|
||||||
const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm;
|
const int nsm = ggml_cuda_info().devices[ggml_cuda_get_device()].nsm;
|
||||||
|
|
||||||
const int32_t precision = KQV->op_params[1];
|
const int32_t precision = KQV->op_params[2];
|
||||||
|
|
||||||
if (!fp16_mma_available(cc)) {
|
if (!fp16_mma_available(cc)) {
|
||||||
GGML_ASSERT(precision == GGML_PREC_DEFAULT);
|
GGML_ASSERT(precision == GGML_PREC_DEFAULT);
|
||||||
|
|
|
@ -11,7 +11,7 @@ __device__ float __forceinline__ t2f32<half>(half val) {
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool vals_smem, int ncols_template, int block_size_template, typename T>
|
template <bool vals_smem, int ncols_template, int block_size_template, typename T>
|
||||||
static __global__ void soft_max_f32(const float * x, const T * mask, const T * pos, float * dst, const int ncols_par, const int nrows_y, const float scale, const float max_bias, const float m0, const float m1, uint32_t n_head_log2) {
|
static __global__ void soft_max_f32(const float * x, const T * mask, float * dst, const int ncols_par, const int nrows_y, const float scale, const float max_bias, const float m0, const float m1, uint32_t n_head_log2) {
|
||||||
const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
|
const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
|
||||||
|
|
||||||
const int tid = threadIdx.x;
|
const int tid = threadIdx.x;
|
||||||
|
@ -23,16 +23,16 @@ static __global__ void soft_max_f32(const float * x, const T * mask, const T * p
|
||||||
const int warp_id = threadIdx.x / WARP_SIZE;
|
const int warp_id = threadIdx.x / WARP_SIZE;
|
||||||
const int lane_id = threadIdx.x % WARP_SIZE;
|
const int lane_id = threadIdx.x % WARP_SIZE;
|
||||||
|
|
||||||
float slope = 0.0f;
|
float slope = 1.0f;
|
||||||
|
|
||||||
// ALiBi
|
// ALiBi
|
||||||
if (max_bias > 0.0f) {
|
if (max_bias > 0.0f) {
|
||||||
const int h = rowx/nrows_y; // head index
|
const int h = rowx/nrows_y; // head index
|
||||||
|
|
||||||
const float base = h < n_head_log2 ? m0 : m1;
|
const float base = h < n_head_log2 ? m0 : m1;
|
||||||
const int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
||||||
|
|
||||||
slope = powf(base, exp);
|
slope = powf(base, exph);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern __shared__ float data_soft_max_f32[];
|
extern __shared__ float data_soft_max_f32[];
|
||||||
|
@ -53,7 +53,7 @@ static __global__ void soft_max_f32(const float * x, const T * mask, const T * p
|
||||||
const int64_t ix = (int64_t)rowx*ncols + col;
|
const int64_t ix = (int64_t)rowx*ncols + col;
|
||||||
const int64_t iy = (int64_t)rowy*ncols + col;
|
const int64_t iy = (int64_t)rowy*ncols + col;
|
||||||
|
|
||||||
const float val = x[ix]*scale + (mask ? t2f32(mask[iy]) : 0.0f) + (pos ? slope*t2f32(pos[col]) : 0.0f);
|
const float val = x[ix]*scale + (mask ? slope*t2f32(mask[iy]) : 0.0f);
|
||||||
|
|
||||||
vals[col] = val;
|
vals[col] = val;
|
||||||
max_val = max(max_val, val);
|
max_val = max(max_val, val);
|
||||||
|
@ -125,7 +125,7 @@ static __global__ void soft_max_f32(const float * x, const T * mask, const T * p
|
||||||
}
|
}
|
||||||
|
|
||||||
template<typename T>
|
template<typename T>
|
||||||
static void soft_max_f32_cuda(const float * x, const T * mask, const T * pos, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const float scale, const float max_bias, cudaStream_t stream) {
|
static void soft_max_f32_cuda(const float * x, const T * mask, float * dst, const int ncols_x, const int nrows_x, const int nrows_y, const float scale, const float max_bias, cudaStream_t stream) {
|
||||||
int nth = WARP_SIZE;
|
int nth = WARP_SIZE;
|
||||||
while (nth < ncols_x && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2;
|
while (nth < ncols_x && nth < CUDA_SOFT_MAX_BLOCK_SIZE) nth *= 2;
|
||||||
const dim3 block_dims(nth, 1, 1);
|
const dim3 block_dims(nth, 1, 1);
|
||||||
|
@ -133,8 +133,8 @@ static void soft_max_f32_cuda(const float * x, const T * mask, const T * pos, fl
|
||||||
const size_t shmem = (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE)*sizeof(float);
|
const size_t shmem = (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE)*sizeof(float);
|
||||||
static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted.");
|
static_assert(CUDA_SOFT_MAX_BLOCK_SIZE == 1024, "These values need to be adjusted.");
|
||||||
|
|
||||||
const uint32_t n_head_kv = nrows_x/nrows_y;
|
const uint32_t n_head = nrows_x/nrows_y;
|
||||||
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
|
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
|
||||||
|
|
||||||
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
@ -142,43 +142,42 @@ static void soft_max_f32_cuda(const float * x, const T * mask, const T * pos, fl
|
||||||
if (shmem < ggml_cuda_info().devices[ggml_cuda_get_device()].smpb) {
|
if (shmem < ggml_cuda_info().devices[ggml_cuda_get_device()].smpb) {
|
||||||
switch (ncols_x) {
|
switch (ncols_x) {
|
||||||
case 32:
|
case 32:
|
||||||
soft_max_f32<true, 32, 32><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 32, 32><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 64:
|
case 64:
|
||||||
soft_max_f32<true, 64, 64><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 64, 64><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 128:
|
case 128:
|
||||||
soft_max_f32<true, 128, 128><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 128, 128><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 256:
|
case 256:
|
||||||
soft_max_f32<true, 256, 256><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 256, 256><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 512:
|
case 512:
|
||||||
soft_max_f32<true, 512, 512><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 512, 512><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 1024:
|
case 1024:
|
||||||
soft_max_f32<true, 1024, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 1024, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 2048:
|
case 2048:
|
||||||
soft_max_f32<true, 2048, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 2048, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
case 4096:
|
case 4096:
|
||||||
soft_max_f32<true, 4096, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 4096, 1024><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
soft_max_f32<true, 0, 0><<<block_nums, block_dims, shmem, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<true, 0, 0><<<block_nums, block_dims, shmem, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
const size_t shmem_low = WARP_SIZE*sizeof(float);
|
const size_t shmem_low = WARP_SIZE*sizeof(float);
|
||||||
soft_max_f32<false, 0, 0><<<block_nums, block_dims, shmem_low, stream>>>(x, mask, pos, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
soft_max_f32<false, 0, 0><<<block_nums, block_dims, shmem_low, stream>>>(x, mask, dst, ncols_x, nrows_y, scale, max_bias, m0, m1, n_head_log2);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
const ggml_tensor * src0 = dst->src[0];
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
const ggml_tensor * src1 = dst->src[1];
|
const ggml_tensor * src1 = dst->src[1];
|
||||||
const ggml_tensor * src2 = dst->src[2];
|
|
||||||
|
|
||||||
const float * src0_d = (const float *)src0->data;
|
const float * src0_d = (const float *)src0->data;
|
||||||
const void * src1_d = src1 ? (const void *)src1->data : nullptr;
|
const void * src1_d = src1 ? (const void *)src1->data : nullptr;
|
||||||
|
@ -190,7 +189,6 @@ void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional
|
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional
|
||||||
GGML_ASSERT(!src2 || src2->type == GGML_TYPE_F16 || src2->type == GGML_TYPE_F32); // src2 contains positions and it is optional
|
|
||||||
|
|
||||||
const int64_t ne00 = src0->ne[0];
|
const int64_t ne00 = src0->ne[0];
|
||||||
const int64_t nrows_x = ggml_nrows(src0);
|
const int64_t nrows_x = ggml_nrows(src0);
|
||||||
|
@ -202,26 +200,15 @@ void ggml_cuda_op_soft_max(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
|
memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
|
||||||
memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float));
|
memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
// positions tensor
|
const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16);
|
||||||
void * src2_d = nullptr;
|
|
||||||
|
|
||||||
const bool use_src2 = src2 != nullptr;
|
|
||||||
|
|
||||||
if (use_src2) {
|
|
||||||
src2_d = (void *)src2->data;
|
|
||||||
}
|
|
||||||
|
|
||||||
const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16) || (src2 && src2->type == GGML_TYPE_F16);
|
|
||||||
|
|
||||||
if (use_f16) {
|
if (use_f16) {
|
||||||
const half * src1_dd = (const half *)src1_d;
|
const half * src1_dd = (const half *)src1_d;
|
||||||
const half * src2_dd = (const half *)src2_d;
|
|
||||||
|
|
||||||
soft_max_f32_cuda(src0_d, src1_dd, src2_dd, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream);
|
soft_max_f32_cuda(src0_d, src1_dd, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream);
|
||||||
} else {
|
} else {
|
||||||
const float * src1_dd = (const float *)src1_d;
|
const float * src1_dd = (const float *)src1_d;
|
||||||
const float * src2_dd = (const float *)src2_d;
|
|
||||||
|
|
||||||
soft_max_f32_cuda(src0_d, src1_dd, src2_dd, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream);
|
soft_max_f32_cuda(src0_d, src1_dd, dst_d, ne00, nrows_x, nrows_y, scale, max_bias, stream);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -48,6 +48,15 @@ static __global__ void relu_f32(const float * x, float * dst, const int k) {
|
||||||
dst[i] = fmaxf(x[i], 0);
|
dst[i] = fmaxf(x[i], 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __global__ void sigmoid_f32(const float * x, float * dst, const int k) {
|
||||||
|
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
|
if (i >= k) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
dst[i] = 1.0f / (1.0f + expf(-x[i]));
|
||||||
|
}
|
||||||
|
|
||||||
static __global__ void hardsigmoid_f32(const float * x, float * dst, const int k) {
|
static __global__ void hardsigmoid_f32(const float * x, float * dst, const int k) {
|
||||||
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
|
@ -108,6 +117,11 @@ static void relu_f32_cuda(const float * x, float * dst, const int k, cudaStream_
|
||||||
relu_f32<<<num_blocks, CUDA_RELU_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
relu_f32<<<num_blocks, CUDA_RELU_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void sigmoid_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
|
||||||
|
const int num_blocks = (k + CUDA_SIGMOID_BLOCK_SIZE - 1) / CUDA_SIGMOID_BLOCK_SIZE;
|
||||||
|
sigmoid_f32<<<num_blocks, CUDA_SIGMOID_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
||||||
|
}
|
||||||
|
|
||||||
static void hardsigmoid_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
|
static void hardsigmoid_f32_cuda(const float * x, float * dst, const int k, cudaStream_t stream) {
|
||||||
const int num_blocks = (k + CUDA_HARDSIGMOID_BLOCK_SIZE - 1) / CUDA_HARDSIGMOID_BLOCK_SIZE;
|
const int num_blocks = (k + CUDA_HARDSIGMOID_BLOCK_SIZE - 1) / CUDA_HARDSIGMOID_BLOCK_SIZE;
|
||||||
hardsigmoid_f32<<<num_blocks, CUDA_HARDSIGMOID_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
hardsigmoid_f32<<<num_blocks, CUDA_HARDSIGMOID_BLOCK_SIZE, 0, stream>>>(x, dst, k);
|
||||||
|
@ -188,6 +202,18 @@ void ggml_cuda_op_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
relu_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream);
|
relu_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ggml_cuda_op_sigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
float * dst_d = (float *)dst->data;
|
||||||
|
cudaStream_t stream = ctx.stream();
|
||||||
|
|
||||||
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
|
sigmoid_f32_cuda(src0_d, dst_d, ggml_nelements(src0), stream);
|
||||||
|
}
|
||||||
|
|
||||||
void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
|
||||||
const ggml_tensor * src0 = dst->src[0];
|
const ggml_tensor * src0 = dst->src[0];
|
||||||
const float * src0_d = (const float *)src0->data;
|
const float * src0_d = (const float *)src0->data;
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
#define CUDA_SILU_BLOCK_SIZE 256
|
#define CUDA_SILU_BLOCK_SIZE 256
|
||||||
#define CUDA_TANH_BLOCK_SIZE 256
|
#define CUDA_TANH_BLOCK_SIZE 256
|
||||||
#define CUDA_RELU_BLOCK_SIZE 256
|
#define CUDA_RELU_BLOCK_SIZE 256
|
||||||
|
#define CUDA_SIGMOID_BLOCK_SIZE 256
|
||||||
#define CUDA_HARDSIGMOID_BLOCK_SIZE 256
|
#define CUDA_HARDSIGMOID_BLOCK_SIZE 256
|
||||||
#define CUDA_HARDSWISH_BLOCK_SIZE 256
|
#define CUDA_HARDSWISH_BLOCK_SIZE 256
|
||||||
#define CUDA_SQR_BLOCK_SIZE 256
|
#define CUDA_SQR_BLOCK_SIZE 256
|
||||||
|
@ -18,6 +19,8 @@ void ggml_cuda_op_tanh(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
void ggml_cuda_op_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_relu(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
|
void ggml_cuda_op_sigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_hardsigmoid(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
||||||
void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
void ggml_cuda_op_hardswish(ggml_backend_cuda_context & ctx, ggml_tensor * dst);
|
||||||
|
|
|
@ -1559,12 +1559,18 @@ static void ggml_vk_graph_compute(struct ggml_kompute_context * ctx, struct ggml
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
{
|
{
|
||||||
float scale;
|
float scale;
|
||||||
memcpy(&scale, dst->op_params, sizeof(float));
|
float max_bias;
|
||||||
|
|
||||||
#pragma message("TODO: add ggml_vk_soft_max() F16/F32 src1 and src2 support")
|
memcpy(&scale, (float *)dst->op_params + 0, sizeof(float));
|
||||||
|
memcpy(&max_bias, (float *)dst->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
#pragma message("TODO: add ggml_vk_soft_max() F16 src1 support")
|
||||||
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5021")
|
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5021")
|
||||||
GGML_ASSERT(!src1 || src1t == GGML_TYPE_F32);
|
GGML_ASSERT(!src1 || src1t == GGML_TYPE_F32);
|
||||||
GGML_ASSERT(src2 == nullptr);
|
|
||||||
|
#pragma message("TODO: add ALiBi support")
|
||||||
|
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/7192")
|
||||||
|
GGML_ASSERT(max_bias == 0.0f);
|
||||||
|
|
||||||
ggml_vk_soft_max(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, ne02, ne03, scale);
|
ggml_vk_soft_max(seq, id_src0, id_src1, id_dst, off_src0, off_src1, off_dst, ne00, ne01, ne02, ne03, scale);
|
||||||
} break;
|
} break;
|
||||||
|
|
140
ggml-metal.m
140
ggml-metal.m
|
@ -40,6 +40,7 @@ enum ggml_metal_kernel_type {
|
||||||
GGML_METAL_KERNEL_TYPE_CLAMP,
|
GGML_METAL_KERNEL_TYPE_CLAMP,
|
||||||
GGML_METAL_KERNEL_TYPE_TANH,
|
GGML_METAL_KERNEL_TYPE_TANH,
|
||||||
GGML_METAL_KERNEL_TYPE_RELU,
|
GGML_METAL_KERNEL_TYPE_RELU,
|
||||||
|
GGML_METAL_KERNEL_TYPE_SIGMOID,
|
||||||
GGML_METAL_KERNEL_TYPE_GELU,
|
GGML_METAL_KERNEL_TYPE_GELU,
|
||||||
GGML_METAL_KERNEL_TYPE_GELU_4,
|
GGML_METAL_KERNEL_TYPE_GELU_4,
|
||||||
GGML_METAL_KERNEL_TYPE_GELU_QUICK,
|
GGML_METAL_KERNEL_TYPE_GELU_QUICK,
|
||||||
|
@ -169,7 +170,6 @@ enum ggml_metal_kernel_type {
|
||||||
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32,
|
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32,
|
||||||
GGML_METAL_KERNEL_TYPE_ROPE_F32,
|
GGML_METAL_KERNEL_TYPE_ROPE_F32,
|
||||||
GGML_METAL_KERNEL_TYPE_ROPE_F16,
|
GGML_METAL_KERNEL_TYPE_ROPE_F16,
|
||||||
GGML_METAL_KERNEL_TYPE_ALIBI_F32,
|
|
||||||
GGML_METAL_KERNEL_TYPE_IM2COL_F16,
|
GGML_METAL_KERNEL_TYPE_IM2COL_F16,
|
||||||
GGML_METAL_KERNEL_TYPE_IM2COL_F32,
|
GGML_METAL_KERNEL_TYPE_IM2COL_F32,
|
||||||
GGML_METAL_KERNEL_TYPE_UPSCALE_F32,
|
GGML_METAL_KERNEL_TYPE_UPSCALE_F32,
|
||||||
|
@ -494,6 +494,7 @@ static struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CLAMP, clamp, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CLAMP, clamp, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TANH, tanh, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_TANH, tanh, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RELU, relu, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_RELU, relu, true);
|
||||||
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_SIGMOID, sigmoid, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU, gelu, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU, gelu, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_4, gelu_4, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_4, gelu_4, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK, gelu_quick, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GELU_QUICK, gelu_quick, true);
|
||||||
|
@ -623,7 +624,6 @@ static struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F32, rope_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F32, rope_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F16, rope_f16, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ROPE_F16, rope_f16, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ALIBI_F32, alibi_f32, true);
|
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F16, im2col_f16, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F16, im2col_f16, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_IM2COL_F32, im2col_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_UPSCALE_F32, upscale_f32, true);
|
||||||
|
@ -633,14 +633,14 @@ static struct ggml_metal_context * ggml_metal_init(int n_cb) {
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, argsort_f32_i32_asc, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_ASC, argsort_f32_i32_asc, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC, argsort_f32_i32_desc, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_ARGSORT_F32_I32_DESC, argsort_f32_i32_desc, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32, leaky_relu_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_LEAKY_RELU_F32, leaky_relu_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H64, flash_attn_ext_f16_h64, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H64, flash_attn_ext_f16_h64, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H80, flash_attn_ext_f16_h80, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H80, flash_attn_ext_f16_h80, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H96, flash_attn_ext_f16_h96, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H96, flash_attn_ext_f16_h96, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H112, flash_attn_ext_f16_h112, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H112, flash_attn_ext_f16_h112, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H128, flash_attn_ext_f16_h128, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H128, flash_attn_ext_f16_h128, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H256, flash_attn_ext_f16_h256, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_F16_H256, flash_attn_ext_f16_h256, ctx->support_simdgroup_mm);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H128, flash_attn_ext_vec_f16_h128, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H128, flash_attn_ext_vec_f16_h128, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H256, flash_attn_ext_vec_f16_h256, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_FLASH_ATTN_EXT_VEC_F16_H256, flash_attn_ext_vec_f16_h256, ctx->support_simdgroup_reduction);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F16, cpy_f32_f16, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F16, cpy_f32_f16, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_F32, cpy_f32_f32, true);
|
||||||
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0, cpy_f32_q8_0, true);
|
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_CPY_F32_Q8_0, cpy_f32_q8_0, true);
|
||||||
|
@ -732,6 +732,7 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const
|
||||||
switch (ggml_get_unary_op(op)) {
|
switch (ggml_get_unary_op(op)) {
|
||||||
case GGML_UNARY_OP_TANH:
|
case GGML_UNARY_OP_TANH:
|
||||||
case GGML_UNARY_OP_RELU:
|
case GGML_UNARY_OP_RELU:
|
||||||
|
case GGML_UNARY_OP_SIGMOID:
|
||||||
case GGML_UNARY_OP_GELU:
|
case GGML_UNARY_OP_GELU:
|
||||||
case GGML_UNARY_OP_GELU_QUICK:
|
case GGML_UNARY_OP_GELU_QUICK:
|
||||||
case GGML_UNARY_OP_SILU:
|
case GGML_UNARY_OP_SILU:
|
||||||
|
@ -759,7 +760,6 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const
|
||||||
case GGML_OP_GROUP_NORM:
|
case GGML_OP_GROUP_NORM:
|
||||||
return ctx->support_simdgroup_reduction;
|
return ctx->support_simdgroup_reduction;
|
||||||
case GGML_OP_NORM:
|
case GGML_OP_NORM:
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
case GGML_OP_IM2COL:
|
case GGML_OP_IM2COL:
|
||||||
return true;
|
return true;
|
||||||
|
@ -772,8 +772,9 @@ static bool ggml_metal_supports_op(const struct ggml_metal_context * ctx, const
|
||||||
case GGML_OP_TIMESTEP_EMBEDDING:
|
case GGML_OP_TIMESTEP_EMBEDDING:
|
||||||
case GGML_OP_ARGSORT:
|
case GGML_OP_ARGSORT:
|
||||||
case GGML_OP_LEAKY_RELU:
|
case GGML_OP_LEAKY_RELU:
|
||||||
case GGML_OP_FLASH_ATTN_EXT:
|
|
||||||
return true;
|
return true;
|
||||||
|
case GGML_OP_FLASH_ATTN_EXT:
|
||||||
|
return ctx->support_simdgroup_mm; // TODO: over-restricted for vec-kernels
|
||||||
case GGML_OP_MUL_MAT:
|
case GGML_OP_MUL_MAT:
|
||||||
case GGML_OP_MUL_MAT_ID:
|
case GGML_OP_MUL_MAT_ID:
|
||||||
return ctx->support_simdgroup_reduction &&
|
return ctx->support_simdgroup_reduction &&
|
||||||
|
@ -1239,6 +1240,18 @@ static enum ggml_status ggml_metal_graph_compute(
|
||||||
|
|
||||||
const int64_t n = ggml_nelements(dst);
|
const int64_t n = ggml_nelements(dst);
|
||||||
|
|
||||||
|
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
||||||
|
} break;
|
||||||
|
case GGML_UNARY_OP_SIGMOID:
|
||||||
|
{
|
||||||
|
id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_SIGMOID].pipeline;
|
||||||
|
|
||||||
|
[encoder setComputePipelineState:pipeline];
|
||||||
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
||||||
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
||||||
|
|
||||||
|
const int64_t n = ggml_nelements(dst);
|
||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(n, 1, 1) threadsPerThreadgroup:MTLSizeMake(1, 1, 1)];
|
||||||
} break;
|
} break;
|
||||||
case GGML_UNARY_OP_GELU:
|
case GGML_UNARY_OP_GELU:
|
||||||
|
@ -1357,13 +1370,12 @@ static enum ggml_status ggml_metal_graph_compute(
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
{
|
{
|
||||||
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32);
|
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT(!src2 || src2->type == GGML_TYPE_F16 || src2->type == GGML_TYPE_F32);
|
|
||||||
|
|
||||||
int nth = 32; // SIMD width
|
int nth = 32; // SIMD width
|
||||||
|
|
||||||
id<MTLComputePipelineState> pipeline = nil;
|
id<MTLComputePipelineState> pipeline = nil;
|
||||||
|
|
||||||
const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16) || (src2 && src2->type == GGML_TYPE_F16);
|
const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16);
|
||||||
|
|
||||||
if (ne00%4 == 0) {
|
if (ne00%4 == 0) {
|
||||||
while (nth < ne00/4 && nth < 256) {
|
while (nth < ne00/4 && nth < 256) {
|
||||||
|
@ -1394,8 +1406,8 @@ static enum ggml_status ggml_metal_graph_compute(
|
||||||
const int64_t nrows_x = ggml_nrows(src0);
|
const int64_t nrows_x = ggml_nrows(src0);
|
||||||
const int64_t nrows_y = src0->ne[1];
|
const int64_t nrows_y = src0->ne[1];
|
||||||
|
|
||||||
const uint32_t n_head_kv = nrows_x/nrows_y;
|
const uint32_t n_head = nrows_x/nrows_y;
|
||||||
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head_kv));
|
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
|
||||||
|
|
||||||
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
@ -1407,20 +1419,15 @@ static enum ggml_status ggml_metal_graph_compute(
|
||||||
} else {
|
} else {
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
|
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:1];
|
||||||
}
|
}
|
||||||
if (id_src2) {
|
[encoder setBuffer:id_dst offset:offs_dst atIndex:2];
|
||||||
[encoder setBuffer:id_src2 offset:offs_src2 atIndex:2];
|
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:3];
|
||||||
} else {
|
[encoder setBytes:&ne01 length:sizeof(ne01) atIndex:4];
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:2];
|
[encoder setBytes:&ne02 length:sizeof(ne02) atIndex:5];
|
||||||
}
|
[encoder setBytes:&scale length:sizeof(scale) atIndex:6];
|
||||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:3];
|
[encoder setBytes:&max_bias length:sizeof(max_bias) atIndex:7];
|
||||||
[encoder setBytes:&ne00 length:sizeof(ne00) atIndex:4];
|
[encoder setBytes:&m0 length:sizeof(m0) atIndex:8];
|
||||||
[encoder setBytes:&ne01 length:sizeof(ne01) atIndex:5];
|
[encoder setBytes:&m1 length:sizeof(m1) atIndex:9];
|
||||||
[encoder setBytes:&ne02 length:sizeof(ne02) atIndex:6];
|
[encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:10];
|
||||||
[encoder setBytes:&scale length:sizeof(scale) atIndex:7];
|
|
||||||
[encoder setBytes:&max_bias length:sizeof(max_bias) atIndex:8];
|
|
||||||
[encoder setBytes:&m0 length:sizeof(m0) atIndex:9];
|
|
||||||
[encoder setBytes:&m1 length:sizeof(m1) atIndex:10];
|
|
||||||
[encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:11];
|
|
||||||
[encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
|
[encoder setThreadgroupMemoryLength:32*sizeof(float) atIndex:0];
|
||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(ne01*ne02*ne03, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||||
|
@ -2225,49 +2232,6 @@ static enum ggml_status ggml_metal_graph_compute(
|
||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
[encoder dispatchThreadgroups:MTLSizeMake(nrows, 1, 1) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
{
|
|
||||||
GGML_ASSERT((src0t == GGML_TYPE_F32));
|
|
||||||
|
|
||||||
const int nth = MIN(1024, ne00);
|
|
||||||
|
|
||||||
//const int n_past = ((int32_t *) dst->op_params)[0];
|
|
||||||
const int n_head = ((int32_t *) dst->op_params)[1];
|
|
||||||
|
|
||||||
float max_bias;
|
|
||||||
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
|
|
||||||
|
|
||||||
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
|
||||||
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
|
|
||||||
|
|
||||||
id<MTLComputePipelineState> pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_ALIBI_F32].pipeline;
|
|
||||||
|
|
||||||
[encoder setComputePipelineState:pipeline];
|
|
||||||
[encoder setBuffer:id_src0 offset:offs_src0 atIndex:0];
|
|
||||||
[encoder setBuffer:id_dst offset:offs_dst atIndex:1];
|
|
||||||
[encoder setBytes:&ne00 length:sizeof( int64_t) atIndex:2];
|
|
||||||
[encoder setBytes:&ne01 length:sizeof( int64_t) atIndex:3];
|
|
||||||
[encoder setBytes:&ne02 length:sizeof( int64_t) atIndex:4];
|
|
||||||
[encoder setBytes:&ne03 length:sizeof( int64_t) atIndex:5];
|
|
||||||
[encoder setBytes:&nb00 length:sizeof(uint64_t) atIndex:6];
|
|
||||||
[encoder setBytes:&nb01 length:sizeof(uint64_t) atIndex:7];
|
|
||||||
[encoder setBytes:&nb02 length:sizeof(uint64_t) atIndex:8];
|
|
||||||
[encoder setBytes:&nb03 length:sizeof(uint64_t) atIndex:9];
|
|
||||||
[encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:10];
|
|
||||||
[encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:11];
|
|
||||||
[encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:12];
|
|
||||||
[encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:13];
|
|
||||||
[encoder setBytes:&nb0 length:sizeof(uint64_t) atIndex:14];
|
|
||||||
[encoder setBytes:&nb1 length:sizeof(uint64_t) atIndex:15];
|
|
||||||
[encoder setBytes:&nb2 length:sizeof(uint64_t) atIndex:16];
|
|
||||||
[encoder setBytes:&nb3 length:sizeof(uint64_t) atIndex:17];
|
|
||||||
[encoder setBytes:&m0 length:sizeof( float) atIndex:18];
|
|
||||||
[encoder setBytes:&m1 length:sizeof( float) atIndex:19];
|
|
||||||
[encoder setBytes:&n_heads_log2_floor length:sizeof(int) atIndex:20];
|
|
||||||
|
|
||||||
[encoder dispatchThreadgroups:MTLSizeMake(ne01, ne02, ne03) threadsPerThreadgroup:MTLSizeMake(nth, 1, 1)];
|
|
||||||
} break;
|
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
{
|
{
|
||||||
GGML_ASSERT(ne10 == ne02);
|
GGML_ASSERT(ne10 == ne02);
|
||||||
|
@ -2565,7 +2529,7 @@ static enum ggml_status ggml_metal_graph_compute(
|
||||||
"the Flash-Attention Metal kernel requires the mask to be padded to 8 and at least n_queries big");
|
"the Flash-Attention Metal kernel requires the mask to be padded to 8 and at least n_queries big");
|
||||||
|
|
||||||
const int64_t ne30 = src3 ? src3->ne[0] : 0; GGML_UNUSED(ne30);
|
const int64_t ne30 = src3 ? src3->ne[0] : 0; GGML_UNUSED(ne30);
|
||||||
const int64_t ne31 = src3 ? src3->ne[1] : 0;
|
//const int64_t ne31 = src3 ? src3->ne[1] : 0;
|
||||||
const int64_t ne32 = src3 ? src3->ne[2] : 0; GGML_UNUSED(ne32);
|
const int64_t ne32 = src3 ? src3->ne[2] : 0; GGML_UNUSED(ne32);
|
||||||
const int64_t ne33 = src3 ? src3->ne[3] : 0; GGML_UNUSED(ne33);
|
const int64_t ne33 = src3 ? src3->ne[3] : 0; GGML_UNUSED(ne33);
|
||||||
|
|
||||||
|
@ -2577,7 +2541,16 @@ static enum ggml_status ggml_metal_graph_compute(
|
||||||
const enum ggml_type src2t = src2 ? src2->type : GGML_TYPE_COUNT; GGML_UNUSED(src2t);
|
const enum ggml_type src2t = src2 ? src2->type : GGML_TYPE_COUNT; GGML_UNUSED(src2t);
|
||||||
|
|
||||||
float scale;
|
float scale;
|
||||||
memcpy(&scale, dst->op_params, sizeof(float));
|
float max_bias;
|
||||||
|
|
||||||
|
memcpy(&scale, ((int32_t *) dst->op_params) + 0, sizeof(scale));
|
||||||
|
memcpy(&max_bias, ((int32_t *) dst->op_params) + 1, sizeof(max_bias));
|
||||||
|
|
||||||
|
const uint32_t n_head = src0->ne[2];
|
||||||
|
const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
|
||||||
|
|
||||||
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
|
||||||
id<MTLComputePipelineState> pipeline = nil;
|
id<MTLComputePipelineState> pipeline = nil;
|
||||||
|
|
||||||
|
@ -2635,13 +2608,16 @@ static enum ggml_status ggml_metal_graph_compute(
|
||||||
[encoder setBytes:&nb11 length:sizeof(uint64_t) atIndex:18];
|
[encoder setBytes:&nb11 length:sizeof(uint64_t) atIndex:18];
|
||||||
[encoder setBytes:&nb12 length:sizeof(uint64_t) atIndex:19];
|
[encoder setBytes:&nb12 length:sizeof(uint64_t) atIndex:19];
|
||||||
[encoder setBytes:&nb13 length:sizeof(uint64_t) atIndex:20];
|
[encoder setBytes:&nb13 length:sizeof(uint64_t) atIndex:20];
|
||||||
[encoder setBytes:&ne31 length:sizeof( int64_t) atIndex:21];
|
[encoder setBytes:&nb31 length:sizeof(uint64_t) atIndex:21];
|
||||||
[encoder setBytes:&nb31 length:sizeof(uint64_t) atIndex:22];
|
[encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:22];
|
||||||
[encoder setBytes:&ne0 length:sizeof( int64_t) atIndex:23];
|
[encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:23];
|
||||||
[encoder setBytes:&ne1 length:sizeof( int64_t) atIndex:24];
|
[encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:24];
|
||||||
[encoder setBytes:&ne2 length:sizeof( int64_t) atIndex:25];
|
[encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:25];
|
||||||
[encoder setBytes:&ne3 length:sizeof( int64_t) atIndex:26];
|
[encoder setBytes:&scale length:sizeof( float) atIndex:26];
|
||||||
[encoder setBytes:&scale length:sizeof( float) atIndex:27];
|
[encoder setBytes:&max_bias length:sizeof( float) atIndex:27];
|
||||||
|
[encoder setBytes:&m0 length:sizeof(m0) atIndex:28];
|
||||||
|
[encoder setBytes:&m1 length:sizeof(m1) atIndex:29];
|
||||||
|
[encoder setBytes:&n_head_log2 length:sizeof(n_head_log2) atIndex:30];
|
||||||
|
|
||||||
if (!use_vec_kernel) {
|
if (!use_vec_kernel) {
|
||||||
// half8x8 kernel
|
// half8x8 kernel
|
||||||
|
|
127
ggml-metal.metal
127
ggml-metal.metal
|
@ -229,6 +229,13 @@ kernel void kernel_relu(
|
||||||
dst[tpig] = max(0.0f, src0[tpig]);
|
dst[tpig] = max(0.0f, src0[tpig]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kernel void kernel_sigmoid(
|
||||||
|
device const float * src0,
|
||||||
|
device float * dst,
|
||||||
|
uint tpig[[thread_position_in_grid]]) {
|
||||||
|
dst[tpig] = 1.0f / (1.0f + exp(-src0[tpig]));
|
||||||
|
}
|
||||||
|
|
||||||
kernel void kernel_tanh(
|
kernel void kernel_tanh(
|
||||||
device const float * src0,
|
device const float * src0,
|
||||||
device float * dst,
|
device float * dst,
|
||||||
|
@ -356,7 +363,6 @@ template<typename T>
|
||||||
kernel void kernel_soft_max(
|
kernel void kernel_soft_max(
|
||||||
device const char * src0,
|
device const char * src0,
|
||||||
device const char * src1,
|
device const char * src1,
|
||||||
device const char * src2,
|
|
||||||
device char * dst,
|
device char * dst,
|
||||||
constant int64_t & ne00,
|
constant int64_t & ne00,
|
||||||
constant int64_t & ne01,
|
constant int64_t & ne01,
|
||||||
|
@ -378,10 +384,9 @@ kernel void kernel_soft_max(
|
||||||
|
|
||||||
device const float * psrc0 = (device const float *) src0 + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
|
device const float * psrc0 = (device const float *) src0 + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
|
||||||
device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*ne00 : nullptr;
|
device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*ne00 : nullptr;
|
||||||
device const T * ppos = src2 != src0 ? (device const T *) src2 : nullptr;
|
|
||||||
device float * pdst = (device float *) dst + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
|
device float * pdst = (device float *) dst + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00);
|
||||||
|
|
||||||
float slope = 0.0f;
|
float slope = 1.0f;
|
||||||
|
|
||||||
// ALiBi
|
// ALiBi
|
||||||
if (max_bias > 0.0f) {
|
if (max_bias > 0.0f) {
|
||||||
|
@ -397,7 +402,7 @@ kernel void kernel_soft_max(
|
||||||
float lmax = -INFINITY;
|
float lmax = -INFINITY;
|
||||||
|
|
||||||
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
||||||
lmax = MAX(lmax, psrc0[i00]*scale + (pmask ? pmask[i00] : 0.0f) + (ppos ? slope*ppos[i00] : 0.0f));
|
lmax = MAX(lmax, psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f));
|
||||||
}
|
}
|
||||||
|
|
||||||
// find the max value in the block
|
// find the max value in the block
|
||||||
|
@ -422,7 +427,7 @@ kernel void kernel_soft_max(
|
||||||
// parallel sum
|
// parallel sum
|
||||||
float lsum = 0.0f;
|
float lsum = 0.0f;
|
||||||
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
for (int i00 = tpitg; i00 < ne00; i00 += ntg) {
|
||||||
const float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? pmask[i00] : 0.0f) + (ppos ? slope*ppos[i00] : 0.0f)) - max_val);
|
const float exp_psrc0 = exp((psrc0[i00]*scale + (pmask ? slope*pmask[i00] : 0.0f)) - max_val);
|
||||||
lsum += exp_psrc0;
|
lsum += exp_psrc0;
|
||||||
pdst[i00] = exp_psrc0;
|
pdst[i00] = exp_psrc0;
|
||||||
}
|
}
|
||||||
|
@ -461,7 +466,6 @@ template<typename T>
|
||||||
kernel void kernel_soft_max_4(
|
kernel void kernel_soft_max_4(
|
||||||
device const char * src0,
|
device const char * src0,
|
||||||
device const char * src1,
|
device const char * src1,
|
||||||
device const char * src2,
|
|
||||||
device char * dst,
|
device char * dst,
|
||||||
constant int64_t & ne00,
|
constant int64_t & ne00,
|
||||||
constant int64_t & ne01,
|
constant int64_t & ne01,
|
||||||
|
@ -483,10 +487,9 @@ kernel void kernel_soft_max_4(
|
||||||
|
|
||||||
device const float4 * psrc4 = (device const float4 *) src0 + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00)/4;
|
device const float4 * psrc4 = (device const float4 *) src0 + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00)/4;
|
||||||
device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*ne00/4 : nullptr;
|
device const T * pmask = src1 != src0 ? (device const T *) src1 + i01*ne00/4 : nullptr;
|
||||||
device const T * ppos = src2 != src0 ? (device const T *) src2 : nullptr;
|
|
||||||
device float4 * pdst4 = (device float4 *) dst + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00)/4;
|
device float4 * pdst4 = (device float4 *) dst + (i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00)/4;
|
||||||
|
|
||||||
float slope = 0.0f;
|
float slope = 1.0f;
|
||||||
|
|
||||||
if (max_bias > 0.0f) {
|
if (max_bias > 0.0f) {
|
||||||
const int64_t h = i02;
|
const int64_t h = i02;
|
||||||
|
@ -501,7 +504,7 @@ kernel void kernel_soft_max_4(
|
||||||
float4 lmax4 = -INFINITY;
|
float4 lmax4 = -INFINITY;
|
||||||
|
|
||||||
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
|
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
|
||||||
lmax4 = fmax(lmax4, psrc4[i00]*scale + (float4)((pmask ? pmask[i00] : 0.0f) + (ppos ? slope*ppos[i00] : 0.0f)));
|
lmax4 = fmax(lmax4, psrc4[i00]*scale + (float4)((pmask ? slope*pmask[i00] : 0.0f)));
|
||||||
}
|
}
|
||||||
|
|
||||||
const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3]));
|
const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3]));
|
||||||
|
@ -527,7 +530,7 @@ kernel void kernel_soft_max_4(
|
||||||
// parallel sum
|
// parallel sum
|
||||||
float4 lsum4 = 0.0f;
|
float4 lsum4 = 0.0f;
|
||||||
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
|
for (int i00 = tpitg; i00 < ne00/4; i00 += ntg) {
|
||||||
const float4 exp_psrc4 = exp((psrc4[i00]*scale + (float4)((pmask ? pmask[i00] : 0.0f) + (ppos ? slope*ppos[i00] : 0.0f))) - max_val);
|
const float4 exp_psrc4 = exp((psrc4[i00]*scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))) - max_val);
|
||||||
lsum4 += exp_psrc4;
|
lsum4 += exp_psrc4;
|
||||||
pdst4[i00] = exp_psrc4;
|
pdst4[i00] = exp_psrc4;
|
||||||
}
|
}
|
||||||
|
@ -1595,60 +1598,6 @@ kernel void kernel_mul_mv_f16_f32_l4(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
kernel void kernel_alibi_f32(
|
|
||||||
device const float * src0,
|
|
||||||
device float * dst,
|
|
||||||
constant int64_t & ne00,
|
|
||||||
constant int64_t & ne01,
|
|
||||||
constant int64_t & ne02,
|
|
||||||
constant int64_t & ne03,
|
|
||||||
constant uint64_t & nb00,
|
|
||||||
constant uint64_t & nb01,
|
|
||||||
constant uint64_t & nb02,
|
|
||||||
constant uint64_t & nb03,
|
|
||||||
constant int64_t & ne0,
|
|
||||||
constant int64_t & ne1,
|
|
||||||
constant int64_t & ne2,
|
|
||||||
constant int64_t & ne3,
|
|
||||||
constant uint64_t & nb0,
|
|
||||||
constant uint64_t & nb1,
|
|
||||||
constant uint64_t & nb2,
|
|
||||||
constant uint64_t & nb3,
|
|
||||||
constant float & m0,
|
|
||||||
constant float & m1,
|
|
||||||
constant int & n_heads_log2_floor,
|
|
||||||
uint3 tgpig[[threadgroup_position_in_grid]],
|
|
||||||
uint3 tpitg[[thread_position_in_threadgroup]],
|
|
||||||
uint3 ntg[[threads_per_threadgroup]]) {
|
|
||||||
const int64_t i03 = tgpig[2];
|
|
||||||
const int64_t i02 = tgpig[1];
|
|
||||||
const int64_t i01 = tgpig[0];
|
|
||||||
|
|
||||||
const int64_t n = i03*ne02*ne01*ne00 + i02*ne01*ne00 + i01*ne00;
|
|
||||||
|
|
||||||
const int64_t i3 = n / (ne2*ne1*ne0);
|
|
||||||
const int64_t i2 = (n - i3*ne2*ne1*ne0) / (ne1*ne0);
|
|
||||||
const int64_t i1 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0) / ne0;
|
|
||||||
//const int64_t i0 = (n - i3*ne2*ne1*ne0 - i2*ne1*ne0 - i1*ne0);
|
|
||||||
|
|
||||||
const int64_t k = i3*ne3 + i2;
|
|
||||||
|
|
||||||
float m_k;
|
|
||||||
if (k < n_heads_log2_floor) {
|
|
||||||
m_k = pow(m0, k + 1);
|
|
||||||
} else {
|
|
||||||
m_k = pow(m1, 2 * (k - n_heads_log2_floor) + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
device char * dst_row = (device char *) dst + i3*nb3 + i2*nb2 + i1*nb1;
|
|
||||||
device const char * src_row = (device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01;
|
|
||||||
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
|
||||||
const float src_v = *(device float *)(src_row + i00*nb00);
|
|
||||||
device float * dst_v = (device float *)(dst_row + i00*nb0);
|
|
||||||
*dst_v = i00 * m_k + src_v;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static float rope_yarn_ramp(const float low, const float high, const int i0) {
|
static float rope_yarn_ramp(const float low, const float high, const int i0) {
|
||||||
const float y = (i0 / 2 - low) / max(0.001f, high - low);
|
const float y = (i0 / 2 - low) / max(0.001f, high - low);
|
||||||
return 1.0f - min(1.0f, max(0.0f, y));
|
return 1.0f - min(1.0f, max(0.0f, y));
|
||||||
|
@ -2116,13 +2065,16 @@ typedef void (flash_attn_ext_f16_t)(
|
||||||
constant uint64_t & nb11,
|
constant uint64_t & nb11,
|
||||||
constant uint64_t & nb12,
|
constant uint64_t & nb12,
|
||||||
constant uint64_t & nb13,
|
constant uint64_t & nb13,
|
||||||
constant int64_t & ne31,
|
|
||||||
constant uint64_t & nb31,
|
constant uint64_t & nb31,
|
||||||
constant int64_t & ne0,
|
constant int64_t & ne0,
|
||||||
constant int64_t & ne1,
|
constant int64_t & ne1,
|
||||||
constant int64_t & ne2,
|
constant int64_t & ne2,
|
||||||
constant int64_t & ne3,
|
constant int64_t & ne3,
|
||||||
constant float & scale,
|
constant float & scale,
|
||||||
|
constant float & max_bias,
|
||||||
|
constant float & m0,
|
||||||
|
constant float & m1,
|
||||||
|
constant uint32_t & n_head_log2,
|
||||||
threadgroup half * shared,
|
threadgroup half * shared,
|
||||||
uint3 tgpig[[threadgroup_position_in_grid]],
|
uint3 tgpig[[threadgroup_position_in_grid]],
|
||||||
uint3 tpitg[[thread_position_in_threadgroup]],
|
uint3 tpitg[[thread_position_in_threadgroup]],
|
||||||
|
@ -2154,13 +2106,16 @@ kernel void kernel_flash_attn_ext_f16(
|
||||||
constant uint64_t & nb11,
|
constant uint64_t & nb11,
|
||||||
constant uint64_t & nb12,
|
constant uint64_t & nb12,
|
||||||
constant uint64_t & nb13,
|
constant uint64_t & nb13,
|
||||||
constant int64_t & ne31,
|
|
||||||
constant uint64_t & nb31,
|
constant uint64_t & nb31,
|
||||||
constant int64_t & ne0,
|
constant int64_t & ne0,
|
||||||
constant int64_t & ne1,
|
constant int64_t & ne1,
|
||||||
constant int64_t & ne2,
|
constant int64_t & ne2,
|
||||||
constant int64_t & ne3,
|
constant int64_t & ne3,
|
||||||
constant float & scale,
|
constant float & scale,
|
||||||
|
constant float & max_bias,
|
||||||
|
constant float & m0,
|
||||||
|
constant float & m1,
|
||||||
|
constant uint32_t & n_head_log2,
|
||||||
threadgroup half * shared [[threadgroup(0)]],
|
threadgroup half * shared [[threadgroup(0)]],
|
||||||
uint3 tgpig[[threadgroup_position_in_grid]],
|
uint3 tgpig[[threadgroup_position_in_grid]],
|
||||||
uint3 tpitg[[thread_position_in_threadgroup]],
|
uint3 tpitg[[thread_position_in_threadgroup]],
|
||||||
|
@ -2257,6 +2212,19 @@ kernel void kernel_flash_attn_ext_f16(
|
||||||
// prepare diagonal scale matrix
|
// prepare diagonal scale matrix
|
||||||
simdgroup_float8x8 mscale(scale);
|
simdgroup_float8x8 mscale(scale);
|
||||||
|
|
||||||
|
// prepare diagonal slope matrix
|
||||||
|
simdgroup_float8x8 mslope(1.0f);
|
||||||
|
|
||||||
|
// ALiBi
|
||||||
|
if (max_bias > 0.0f) {
|
||||||
|
const uint32_t h = iq2;
|
||||||
|
|
||||||
|
const float base = h < n_head_log2 ? m0 : m1;
|
||||||
|
const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
||||||
|
|
||||||
|
mslope = simdgroup_float8x8(pow(base, exph));
|
||||||
|
}
|
||||||
|
|
||||||
// loop over the KV cache
|
// loop over the KV cache
|
||||||
// each simdgroup handles blocks of Q rows and C columns
|
// each simdgroup handles blocks of Q rows and C columns
|
||||||
for (int ic0 = 0; ic0 < ne11; ic0 += C*nsg) {
|
for (int ic0 = 0; ic0 < ne11; ic0 += C*nsg) {
|
||||||
|
@ -2279,9 +2247,10 @@ kernel void kernel_flash_attn_ext_f16(
|
||||||
simdgroup_multiply_accumulate(mqk, mq[i], mk, mqk);
|
simdgroup_multiply_accumulate(mqk, mq[i], mk, mqk);
|
||||||
}
|
}
|
||||||
|
|
||||||
// mqk = mqk*scale + mask
|
// mqk = mqk*scale + mask*slope
|
||||||
simdgroup_half8x8 mm;
|
simdgroup_half8x8 mm;
|
||||||
simdgroup_load(mm, mp + ic + 8*cc, nb31/sizeof(half), 0, false);
|
simdgroup_load(mm, mp + ic + 8*cc, nb31/sizeof(half), 0, false);
|
||||||
|
simdgroup_multiply(mm, mslope, mm);
|
||||||
simdgroup_multiply_accumulate(mqk, mqk, mscale, mm);
|
simdgroup_multiply_accumulate(mqk, mqk, mscale, mm);
|
||||||
|
|
||||||
simdgroup_store(mqk, ss + 8*cc, TF, 0, false);
|
simdgroup_store(mqk, ss + 8*cc, TF, 0, false);
|
||||||
|
@ -2472,13 +2441,16 @@ kernel void kernel_flash_attn_ext_vec_f16(
|
||||||
constant uint64_t & nb11,
|
constant uint64_t & nb11,
|
||||||
constant uint64_t & nb12,
|
constant uint64_t & nb12,
|
||||||
constant uint64_t & nb13,
|
constant uint64_t & nb13,
|
||||||
constant int64_t & ne31,
|
|
||||||
constant uint64_t & nb31,
|
constant uint64_t & nb31,
|
||||||
constant int64_t & ne0,
|
constant int64_t & ne0,
|
||||||
constant int64_t & ne1,
|
constant int64_t & ne1,
|
||||||
constant int64_t & ne2,
|
constant int64_t & ne2,
|
||||||
constant int64_t & ne3,
|
constant int64_t & ne3,
|
||||||
constant float & scale,
|
constant float & scale,
|
||||||
|
constant float & max_bias,
|
||||||
|
constant float & m0,
|
||||||
|
constant float & m1,
|
||||||
|
constant uint32_t & n_head_log2,
|
||||||
threadgroup half * shared [[threadgroup(0)]],
|
threadgroup half * shared [[threadgroup(0)]],
|
||||||
uint3 tgpig[[threadgroup_position_in_grid]],
|
uint3 tgpig[[threadgroup_position_in_grid]],
|
||||||
uint3 tpitg[[thread_position_in_threadgroup]],
|
uint3 tpitg[[thread_position_in_threadgroup]],
|
||||||
|
@ -2497,6 +2469,18 @@ kernel void kernel_flash_attn_ext_vec_f16(
|
||||||
|
|
||||||
const short T = D + 2*nsg*SH; // shared memory size per query in (half)
|
const short T = D + 2*nsg*SH; // shared memory size per query in (half)
|
||||||
|
|
||||||
|
float slope = 1.0f;
|
||||||
|
|
||||||
|
// ALiBi
|
||||||
|
if (max_bias > 0.0f) {
|
||||||
|
const uint32_t h = iq2;
|
||||||
|
|
||||||
|
const float base = h < n_head_log2 ? m0 : m1;
|
||||||
|
const int exp = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
|
||||||
|
|
||||||
|
slope = pow(base, exp);
|
||||||
|
}
|
||||||
|
|
||||||
//threadgroup half * sq = (threadgroup half *) (shared + 0*D); // holds the query data
|
//threadgroup half * sq = (threadgroup half *) (shared + 0*D); // holds the query data
|
||||||
threadgroup half4 * sq4 = (threadgroup half4 *) (shared + 0*D); // same as above but in half4
|
threadgroup half4 * sq4 = (threadgroup half4 *) (shared + 0*D); // same as above but in half4
|
||||||
threadgroup float * ss = (threadgroup float *) (shared + 2*sgitg*SH + 1*D); // scratch buffer for attention and diagonal matrix
|
threadgroup float * ss = (threadgroup float *) (shared + 2*sgitg*SH + 1*D); // scratch buffer for attention and diagonal matrix
|
||||||
|
@ -2603,10 +2587,10 @@ kernel void kernel_flash_attn_ext_vec_f16(
|
||||||
mqk += simd_shuffle_down(mqk, 2);
|
mqk += simd_shuffle_down(mqk, 2);
|
||||||
mqk += simd_shuffle_down(mqk, 1);
|
mqk += simd_shuffle_down(mqk, 1);
|
||||||
|
|
||||||
// mqk = mqk*scale + mask
|
// mqk = mqk*scale + mask*slope
|
||||||
if (tiisg == 0) {
|
if (tiisg == 0) {
|
||||||
float4 mm = (float4) mp4[ic/4 + cc];
|
float4 mm = (float4) mp4[ic/4 + cc];
|
||||||
mqk = mqk*scale + mm;
|
mqk = mqk*scale + mm*slope;
|
||||||
|
|
||||||
ss4[cc] = mqk;
|
ss4[cc] = mqk;
|
||||||
}
|
}
|
||||||
|
@ -2840,7 +2824,8 @@ kernel void kernel_cpy_f32_f16(
|
||||||
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
for (int64_t i00 = tpitg.x; i00 < ne00; i00 += ntg.x) {
|
||||||
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
|
device const float * src = (device float *)((device char *) src0 + i03*nb03 + i02*nb02 + i01*nb01 + i00*nb00);
|
||||||
|
|
||||||
dst_data[i00] = src[0];
|
// TODO: is there a better way to handle -INFINITY?
|
||||||
|
dst_data[i00] = src[0] == -INFINITY ? -MAXHALF : src[0];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -14,6 +14,12 @@
|
||||||
#include <stdlib.h> // for qsort
|
#include <stdlib.h> // for qsort
|
||||||
#include <stdio.h> // for GGML_ASSERT
|
#include <stdio.h> // for GGML_ASSERT
|
||||||
|
|
||||||
|
#if defined(_MSC_VER)
|
||||||
|
// disable "possible loss of data" to avoid warnings for hundreds of casts
|
||||||
|
// we should just be careful :)
|
||||||
|
#pragma warning(disable: 4244 4267)
|
||||||
|
#endif
|
||||||
|
|
||||||
#define UNUSED GGML_UNUSED
|
#define UNUSED GGML_UNUSED
|
||||||
|
|
||||||
// some compilers don't provide _mm256_set_m128i, e.g. gcc 7
|
// some compilers don't provide _mm256_set_m128i, e.g. gcc 7
|
||||||
|
|
138
ggml-sycl.cpp
138
ggml-sycl.cpp
|
@ -3154,7 +3154,6 @@ typedef float (*vec_dot_q_mul_mat_sycl_t)(
|
||||||
#define SYCL_SCALE_BLOCK_SIZE 256
|
#define SYCL_SCALE_BLOCK_SIZE 256
|
||||||
#define SYCL_CLAMP_BLOCK_SIZE 256
|
#define SYCL_CLAMP_BLOCK_SIZE 256
|
||||||
#define SYCL_ROPE_BLOCK_SIZE 256
|
#define SYCL_ROPE_BLOCK_SIZE 256
|
||||||
#define SYCL_ALIBI_BLOCK_SIZE 32
|
|
||||||
#define SYCL_DIAG_MASK_INF_BLOCK_SIZE 32
|
#define SYCL_DIAG_MASK_INF_BLOCK_SIZE 32
|
||||||
#define SYCL_QUANTIZE_BLOCK_SIZE 256
|
#define SYCL_QUANTIZE_BLOCK_SIZE 256
|
||||||
#define SYCL_DEQUANTIZE_BLOCK_SIZE 256
|
#define SYCL_DEQUANTIZE_BLOCK_SIZE 256
|
||||||
|
@ -9316,32 +9315,6 @@ static void rope_glm_f32(
|
||||||
dst[i + half_n_dims * 3] = x2*sin_block_theta + x3*cos_block_theta;
|
dst[i + half_n_dims * 3] = x2*sin_block_theta + x3*cos_block_theta;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void alibi_f32(const float * x, float * dst, const int ncols, const int k_rows,
|
|
||||||
const int n_heads_log2_floor, const float m0, const float m1,
|
|
||||||
const sycl::nd_item<3> &item_ct1) {
|
|
||||||
const int col = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
|
|
||||||
item_ct1.get_local_id(2);
|
|
||||||
|
|
||||||
if (col >= ncols) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
const int row = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
|
|
||||||
item_ct1.get_local_id(1);
|
|
||||||
const int i = row*ncols + col;
|
|
||||||
|
|
||||||
const int k = row/k_rows;
|
|
||||||
|
|
||||||
float m_k;
|
|
||||||
if (k < n_heads_log2_floor) {
|
|
||||||
m_k = dpct::pow(m0, k + 1);
|
|
||||||
} else {
|
|
||||||
m_k = dpct::pow(m1, 2 * (k - n_heads_log2_floor) + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
dst[i] = col * m_k + x[i];
|
|
||||||
}
|
|
||||||
|
|
||||||
static void k_sum_rows_f32(const float * x, float * dst, const int ncols,
|
static void k_sum_rows_f32(const float * x, float * dst, const int ncols,
|
||||||
const sycl::nd_item<3> &item_ct1) {
|
const sycl::nd_item<3> &item_ct1) {
|
||||||
const int row = item_ct1.get_group(1);
|
const int row = item_ct1.get_group(1);
|
||||||
|
@ -9443,7 +9416,7 @@ static void diag_mask_inf_f32(const float * x, float * dst, const int ncols, con
|
||||||
|
|
||||||
|
|
||||||
template <bool vals_smem, int ncols_template, int block_size_template>
|
template <bool vals_smem, int ncols_template, int block_size_template>
|
||||||
static void soft_max_f32(const float * x, const float * mask, const float *pos, float * dst, const int ncols_par,
|
static void soft_max_f32(const float * x, const float * mask, float * dst, const int ncols_par,
|
||||||
const int nrows_y, const float scale, const float max_bias, const float m0,
|
const int nrows_y, const float scale, const float max_bias, const float m0,
|
||||||
const float m1, uint32_t n_head_log2, const sycl::nd_item<3> &item_ct1, float *buf) {
|
const float m1, uint32_t n_head_log2, const sycl::nd_item<3> &item_ct1, float *buf) {
|
||||||
const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
|
const int ncols = ncols_template == 0 ? ncols_par : ncols_template;
|
||||||
|
@ -9457,7 +9430,7 @@ static void soft_max_f32(const float * x, const float * mask, const float *pos,
|
||||||
const int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
|
const int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
|
||||||
const int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
|
const int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
|
||||||
|
|
||||||
float slope = 0.0f;
|
float slope = 1.0f;
|
||||||
|
|
||||||
// ALiBi
|
// ALiBi
|
||||||
if (max_bias > 0.0f) {
|
if (max_bias > 0.0f) {
|
||||||
|
@ -9482,7 +9455,7 @@ static void soft_max_f32(const float * x, const float * mask, const float *pos,
|
||||||
const int ix = rowx*ncols + col;
|
const int ix = rowx*ncols + col;
|
||||||
const int iy = rowy*ncols + col;
|
const int iy = rowy*ncols + col;
|
||||||
|
|
||||||
const float val = x[ix]*scale + (mask ? mask[iy] : 0.0f) + (pos ? slope*pos[col] : 0.0f);
|
const float val = x[ix]*scale + (mask ? slope*mask[iy] : 0.0f);
|
||||||
|
|
||||||
vals[col] = val;
|
vals[col] = val;
|
||||||
max_val = sycl::max(max_val, val);
|
max_val = sycl::max(max_val, val);
|
||||||
|
@ -12964,20 +12937,6 @@ static void rope_glm_f32_sycl(const float *x, float *dst, int ncols, int nrows,
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
static void alibi_f32_sycl(const float *x, float *dst, const int ncols,
|
|
||||||
const int nrows, const int k_rows,
|
|
||||||
const int n_heads_log2_floor, const float m0,
|
|
||||||
const float m1, dpct::queue_ptr stream) {
|
|
||||||
const sycl::range<3> block_dims(1, 1, SYCL_ALIBI_BLOCK_SIZE);
|
|
||||||
const int num_blocks_x = (ncols + SYCL_ALIBI_BLOCK_SIZE - 1) / (SYCL_ALIBI_BLOCK_SIZE);
|
|
||||||
const sycl::range<3> block_nums(1, nrows, num_blocks_x);
|
|
||||||
stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
|
||||||
[=](sycl::nd_item<3> item_ct1) {
|
|
||||||
alibi_f32(x, dst, ncols, k_rows,
|
|
||||||
n_heads_log2_floor, m0, m1, item_ct1);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
static void sum_rows_f32_sycl(const float *x, float *dst, const int ncols,
|
static void sum_rows_f32_sycl(const float *x, float *dst, const int ncols,
|
||||||
const int nrows, dpct::queue_ptr stream) {
|
const int nrows, dpct::queue_ptr stream) {
|
||||||
const sycl::range<3> block_dims(1, 1, WARP_SIZE);
|
const sycl::range<3> block_dims(1, 1, WARP_SIZE);
|
||||||
|
@ -13058,7 +13017,7 @@ static void diag_mask_inf_f32_sycl(const float *x, float *dst,
|
||||||
}
|
}
|
||||||
|
|
||||||
template <bool vals_smem, int ncols_template, int block_size_template>
|
template <bool vals_smem, int ncols_template, int block_size_template>
|
||||||
static void soft_max_f32_submitter(const float * x, const float * mask, const float *pos, float * dst, const int ncols_par,
|
static void soft_max_f32_submitter(const float * x, const float * mask, float * dst, const int ncols_par,
|
||||||
const int nrows_y, const float scale, const float max_bias, const float m0,
|
const int nrows_y, const float scale, const float max_bias, const float m0,
|
||||||
const float m1, uint32_t n_head_log2, sycl::range<3> block_nums, sycl::range<3> block_dims,
|
const float m1, uint32_t n_head_log2, sycl::range<3> block_nums, sycl::range<3> block_dims,
|
||||||
const size_t n_local_scratch, dpct::queue_ptr stream) {
|
const size_t n_local_scratch, dpct::queue_ptr stream) {
|
||||||
|
@ -13068,7 +13027,7 @@ static void soft_max_f32_submitter(const float * x, const float * mask, const fl
|
||||||
cgh.parallel_for(
|
cgh.parallel_for(
|
||||||
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
sycl::nd_range<3>(block_nums * block_dims, block_dims),
|
||||||
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
[=](sycl::nd_item<3> item_ct1) [[intel::reqd_sub_group_size(32)]] {
|
||||||
soft_max_f32<vals_smem, ncols_template, block_size_template>(x, mask, pos, dst, ncols_par,
|
soft_max_f32<vals_smem, ncols_template, block_size_template>(x, mask, dst, ncols_par,
|
||||||
nrows_y, scale, max_bias, m0,
|
nrows_y, scale, max_bias, m0,
|
||||||
m1, n_head_log2, item_ct1,
|
m1, n_head_log2, item_ct1,
|
||||||
local_buf_acc.get_pointer());
|
local_buf_acc.get_pointer());
|
||||||
|
@ -13076,7 +13035,7 @@ static void soft_max_f32_submitter(const float * x, const float * mask, const fl
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
static void soft_max_f32_sycl(const float * x, const float * mask, const float * pos,
|
static void soft_max_f32_sycl(const float * x, const float * mask,
|
||||||
float * dst, const int ncols_x, const int nrows_x,
|
float * dst, const int ncols_x, const int nrows_x,
|
||||||
const int nrows_y, const float scale, const float max_bias,
|
const int nrows_y, const float scale, const float max_bias,
|
||||||
dpct::queue_ptr stream) {
|
dpct::queue_ptr stream) {
|
||||||
|
@ -13098,60 +13057,60 @@ static void soft_max_f32_sycl(const float * x, const float * mask, const float *
|
||||||
const size_t local_mem_size = stream->get_device().get_info<sycl::info::device::local_mem_size>();
|
const size_t local_mem_size = stream->get_device().get_info<sycl::info::device::local_mem_size>();
|
||||||
if (n_local_scratch*sizeof(float) < local_mem_size) {
|
if (n_local_scratch*sizeof(float) < local_mem_size) {
|
||||||
if (ncols_x > max_block_size) {
|
if (ncols_x > max_block_size) {
|
||||||
soft_max_f32_submitter<true, 0, 0>(x, mask, pos, dst, ncols_x, nrows_y, scale,
|
soft_max_f32_submitter<true, 0, 0>(x, mask, dst, ncols_x, nrows_y, scale,
|
||||||
max_bias, m0, m1, n_head_log2, block_nums,
|
max_bias, m0, m1, n_head_log2, block_nums,
|
||||||
block_dims, n_local_scratch, stream);
|
block_dims, n_local_scratch, stream);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
switch (ncols_x) {
|
switch (ncols_x) {
|
||||||
case 32:
|
case 32:
|
||||||
soft_max_f32_submitter<true, 32, 32>(x, mask, pos, dst, ncols_x, nrows_y, scale,
|
soft_max_f32_submitter<true, 32, 32>(x, mask, dst, ncols_x, nrows_y, scale,
|
||||||
max_bias, m0, m1, n_head_log2, block_nums,
|
max_bias, m0, m1, n_head_log2, block_nums,
|
||||||
block_dims, n_local_scratch, stream);
|
block_dims, n_local_scratch, stream);
|
||||||
break;
|
break;
|
||||||
case 64:
|
case 64:
|
||||||
soft_max_f32_submitter<true, 64, 64>(x, mask, pos, dst, ncols_x, nrows_y, scale,
|
soft_max_f32_submitter<true, 64, 64>(x, mask, dst, ncols_x, nrows_y, scale,
|
||||||
max_bias, m0, m1, n_head_log2, block_nums,
|
max_bias, m0, m1, n_head_log2, block_nums,
|
||||||
block_dims, n_local_scratch, stream);
|
block_dims, n_local_scratch, stream);
|
||||||
break;
|
break;
|
||||||
case 128:
|
case 128:
|
||||||
soft_max_f32_submitter<true, 128, 128>(x, mask, pos, dst, ncols_x, nrows_y, scale,
|
soft_max_f32_submitter<true, 128, 128>(x, mask, dst, ncols_x, nrows_y, scale,
|
||||||
max_bias, m0, m1, n_head_log2, block_nums,
|
max_bias, m0, m1, n_head_log2, block_nums,
|
||||||
block_dims, n_local_scratch, stream);
|
block_dims, n_local_scratch, stream);
|
||||||
break;
|
break;
|
||||||
case 256:
|
case 256:
|
||||||
soft_max_f32_submitter<true, 256, 256>(x, mask, pos, dst, ncols_x, nrows_y, scale,
|
soft_max_f32_submitter<true, 256, 256>(x, mask, dst, ncols_x, nrows_y, scale,
|
||||||
max_bias, m0, m1, n_head_log2, block_nums,
|
max_bias, m0, m1, n_head_log2, block_nums,
|
||||||
block_dims, n_local_scratch, stream);
|
block_dims, n_local_scratch, stream);
|
||||||
break;
|
break;
|
||||||
case 512:
|
case 512:
|
||||||
soft_max_f32_submitter<true, 512, 512>(x, mask, pos, dst, ncols_x, nrows_y, scale,
|
soft_max_f32_submitter<true, 512, 512>(x, mask, dst, ncols_x, nrows_y, scale,
|
||||||
max_bias, m0, m1, n_head_log2, block_nums,
|
max_bias, m0, m1, n_head_log2, block_nums,
|
||||||
block_dims, n_local_scratch, stream);
|
block_dims, n_local_scratch, stream);
|
||||||
break;
|
break;
|
||||||
case 1024:
|
case 1024:
|
||||||
soft_max_f32_submitter<true, 1024, 1024>(x, mask, pos, dst, ncols_x, nrows_y, scale,
|
soft_max_f32_submitter<true, 1024, 1024>(x, mask, dst, ncols_x, nrows_y, scale,
|
||||||
max_bias, m0, m1, n_head_log2, block_nums,
|
max_bias, m0, m1, n_head_log2, block_nums,
|
||||||
block_dims, n_local_scratch, stream);
|
block_dims, n_local_scratch, stream);
|
||||||
break;
|
break;
|
||||||
case 2048:
|
case 2048:
|
||||||
soft_max_f32_submitter<true, 2048, 1024>(x, mask, pos, dst, ncols_x, nrows_y, scale,
|
soft_max_f32_submitter<true, 2048, 1024>(x, mask, dst, ncols_x, nrows_y, scale,
|
||||||
max_bias, m0, m1, n_head_log2, block_nums,
|
max_bias, m0, m1, n_head_log2, block_nums,
|
||||||
block_dims, n_local_scratch, stream);
|
block_dims, n_local_scratch, stream);
|
||||||
break;
|
break;
|
||||||
case 4096:
|
case 4096:
|
||||||
soft_max_f32_submitter<true, 4096, 1024>(x, mask, pos, dst, ncols_x, nrows_y, scale,
|
soft_max_f32_submitter<true, 4096, 1024>(x, mask, dst, ncols_x, nrows_y, scale,
|
||||||
max_bias, m0, m1, n_head_log2, block_nums,
|
max_bias, m0, m1, n_head_log2, block_nums,
|
||||||
block_dims, n_local_scratch, stream);
|
block_dims, n_local_scratch, stream);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
soft_max_f32_submitter<true, 0, 0>(x, mask, pos, dst, ncols_x, nrows_y, scale,
|
soft_max_f32_submitter<true, 0, 0>(x, mask, dst, ncols_x, nrows_y, scale,
|
||||||
max_bias, m0, m1, n_head_log2, block_nums,
|
max_bias, m0, m1, n_head_log2, block_nums,
|
||||||
block_dims, n_local_scratch, stream);
|
block_dims, n_local_scratch, stream);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
soft_max_f32_submitter<false, 0, 0>(x, mask, pos, dst, ncols_x, nrows_y, scale,
|
soft_max_f32_submitter<false, 0, 0>(x, mask, dst, ncols_x, nrows_y, scale,
|
||||||
max_bias, m0, m1, n_head_log2, block_nums,
|
max_bias, m0, m1, n_head_log2, block_nums,
|
||||||
block_dims, WARP_SIZE, stream);
|
block_dims, WARP_SIZE, stream);
|
||||||
}
|
}
|
||||||
|
@ -14562,36 +14521,6 @@ inline void ggml_sycl_op_rope(const ggml_tensor *src0, const ggml_tensor *src1,
|
||||||
(void) src1_dd;
|
(void) src1_dd;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void ggml_sycl_op_alibi(const ggml_tensor *src0, const ggml_tensor *src1,
|
|
||||||
ggml_tensor *dst, const float *src0_dd,
|
|
||||||
const float *src1_dd, float *dst_dd,
|
|
||||||
const dpct::queue_ptr &main_stream) {
|
|
||||||
|
|
||||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
|
||||||
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
|
||||||
|
|
||||||
GGML_TENSOR_LOCALS_3(int64_t, ne0, src0, ne);
|
|
||||||
const int64_t nrows = ggml_nrows(src0);
|
|
||||||
|
|
||||||
//const int n_past = ((int32_t *) dst->op_params)[0];
|
|
||||||
const int n_head = ((int32_t *) dst->op_params)[1];
|
|
||||||
float max_bias;
|
|
||||||
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
|
|
||||||
|
|
||||||
//GGML_ASSERT(ne01 + n_past == ne00);
|
|
||||||
GGML_ASSERT(n_head == ne02);
|
|
||||||
|
|
||||||
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
|
||||||
|
|
||||||
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
|
|
||||||
|
|
||||||
alibi_f32_sycl(src0_dd, dst_dd, ne00, nrows, ne01, n_heads_log2_floor, m0, m1, main_stream);
|
|
||||||
|
|
||||||
(void) src1;
|
|
||||||
(void) src1_dd;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_sycl_op_pool2d(const ggml_tensor *src0,
|
static void ggml_sycl_op_pool2d(const ggml_tensor *src0,
|
||||||
const ggml_tensor *src1, ggml_tensor *dst,
|
const ggml_tensor *src1, ggml_tensor *dst,
|
||||||
const float *src0_dd, const float *src1_dd,
|
const float *src0_dd, const float *src1_dd,
|
||||||
|
@ -14746,12 +14675,9 @@ inline void ggml_sycl_op_soft_max(const ggml_tensor *src0,
|
||||||
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
GGML_ASSERT(src0->type == GGML_TYPE_F32);
|
||||||
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
GGML_ASSERT( dst->type == GGML_TYPE_F32);
|
||||||
|
|
||||||
const ggml_tensor * src2 = dst->src[2];
|
#pragma message("TODO: add ggml_sycl_op_soft_max() F16 src1 support")
|
||||||
|
|
||||||
#pragma message("TODO: add ggml_sycl_op_soft_max() F16 src1 and src2 support")
|
|
||||||
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5021")
|
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5021")
|
||||||
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional
|
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32); // src1 contains mask and it is optional
|
||||||
GGML_ASSERT(!src2 || src2->type == GGML_TYPE_F32); // src2 contains positions and it is optional
|
|
||||||
|
|
||||||
const int64_t ne00 = src0->ne[0];
|
const int64_t ne00 = src0->ne[0];
|
||||||
const int64_t nrows_x = ggml_nrows(src0);
|
const int64_t nrows_x = ggml_nrows(src0);
|
||||||
|
@ -14763,25 +14689,7 @@ inline void ggml_sycl_op_soft_max(const ggml_tensor *src0,
|
||||||
memcpy(&scale, dst->op_params + 0, sizeof(float));
|
memcpy(&scale, dst->op_params + 0, sizeof(float));
|
||||||
memcpy(&max_bias, dst->op_params + 1, sizeof(float));
|
memcpy(&max_bias, dst->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
// positions tensor
|
soft_max_f32_sycl(src0_dd, src1 ? src1_dd : nullptr, dst_dd, ne00,
|
||||||
float * src2_dd = nullptr;
|
|
||||||
sycl_pool_alloc<float> src2_f;
|
|
||||||
|
|
||||||
const bool use_src2 = src2 != nullptr;
|
|
||||||
|
|
||||||
if (use_src2) {
|
|
||||||
const bool src2_on_device = src2->backend == GGML_BACKEND_TYPE_GPU;
|
|
||||||
|
|
||||||
if (src2_on_device) {
|
|
||||||
ggml_tensor_extra_gpu * src2_extra = (ggml_tensor_extra_gpu *) src2->extra;
|
|
||||||
src2_dd = (float *) src2_extra->data_device[g_main_device];
|
|
||||||
} else {
|
|
||||||
src2_dd = src2_f.alloc(ggml_nelements(src2));
|
|
||||||
SYCL_CHECK(ggml_sycl_cpy_tensor_2d(src2_dd, src2, 0, 0, 0, 1, main_stream));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
soft_max_f32_sycl(src0_dd, src1 ? src1_dd : nullptr, src2_dd, dst_dd, ne00,
|
|
||||||
nrows_x, nrows_y, scale, max_bias, main_stream);
|
nrows_x, nrows_y, scale, max_bias, main_stream);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -16232,10 +16140,6 @@ static void ggml_sycl_rope(const ggml_tensor * src0, const ggml_tensor * src1, g
|
||||||
ggml_sycl_op_flatten(src0, src1, dst, ggml_sycl_op_rope);
|
ggml_sycl_op_flatten(src0, src1, dst, ggml_sycl_op_rope);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void ggml_sycl_alibi(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
|
||||||
ggml_sycl_op_flatten(src0, src1, dst, ggml_sycl_op_alibi);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_sycl_pool2d(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
static void ggml_sycl_pool2d(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
|
||||||
ggml_sycl_op_flatten(src0, src1, dst, ggml_sycl_op_pool2d);
|
ggml_sycl_op_flatten(src0, src1, dst, ggml_sycl_op_pool2d);
|
||||||
}
|
}
|
||||||
|
@ -16612,9 +16516,6 @@ bool ggml_sycl_compute_forward(struct ggml_compute_params * params, struct ggml_
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
func = ggml_sycl_rope;
|
func = ggml_sycl_rope;
|
||||||
break;
|
break;
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
func = ggml_sycl_alibi;
|
|
||||||
break;
|
|
||||||
case GGML_OP_IM2COL:
|
case GGML_OP_IM2COL:
|
||||||
func = ggml_sycl_im2col;
|
func = ggml_sycl_im2col;
|
||||||
break;
|
break;
|
||||||
|
@ -17744,7 +17645,6 @@ GGML_CALL static bool ggml_backend_sycl_supports_op(ggml_backend_t backend, cons
|
||||||
case GGML_OP_DIAG_MASK_INF:
|
case GGML_OP_DIAG_MASK_INF:
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
case GGML_OP_ROPE:
|
case GGML_OP_ROPE:
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
case GGML_OP_IM2COL:
|
case GGML_OP_IM2COL:
|
||||||
case GGML_OP_POOL_2D:
|
case GGML_OP_POOL_2D:
|
||||||
case GGML_OP_SUM_ROWS:
|
case GGML_OP_SUM_ROWS:
|
||||||
|
|
|
@ -3830,9 +3830,8 @@ static vk_pipeline ggml_vk_op_get_pipeline(ggml_backend_vk_context * ctx, const
|
||||||
return nullptr;
|
return nullptr;
|
||||||
case GGML_OP_SOFT_MAX:
|
case GGML_OP_SOFT_MAX:
|
||||||
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16);
|
GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F32 || src1->type == GGML_TYPE_F16);
|
||||||
GGML_ASSERT(!src2 || src2->type == GGML_TYPE_F32 || src2->type == GGML_TYPE_F16);
|
|
||||||
|
|
||||||
if (src0->type == GGML_TYPE_F32 && (src1 == nullptr || src1->type == GGML_TYPE_F32) && (src2 == nullptr || src2->type == GGML_TYPE_F32) && dst->type == GGML_TYPE_F32) {
|
if (src0->type == GGML_TYPE_F32 && (src1 == nullptr || src1->type == GGML_TYPE_F32) && dst->type == GGML_TYPE_F32) {
|
||||||
return ctx->device->pipeline_soft_max_f32;
|
return ctx->device->pipeline_soft_max_f32;
|
||||||
}
|
}
|
||||||
if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16 && src2->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
|
if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16 && src2->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F32) {
|
||||||
|
@ -4286,6 +4285,9 @@ static void ggml_vk_soft_max(ggml_backend_vk_context * ctx, vk_context * subctx,
|
||||||
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
|
||||||
|
#pragma message("TODO: src2 is no longer used in soft_max - should be removed and ALiBi calculation should be updated")
|
||||||
|
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/7192")
|
||||||
|
|
||||||
ggml_vk_op_f32<vk_op_soft_max_push_constants>(ctx, subctx, src0, src1, src2, dst, GGML_OP_SOFT_MAX, {
|
ggml_vk_op_f32<vk_op_soft_max_push_constants>(ctx, subctx, src0, src1, src2, dst, GGML_OP_SOFT_MAX, {
|
||||||
ncols,
|
ncols,
|
||||||
src1 != nullptr ? nrows_y : (uint32_t)0,
|
src1 != nullptr ? nrows_y : (uint32_t)0,
|
||||||
|
|
383
ggml.c
383
ggml.c
|
@ -4,7 +4,6 @@
|
||||||
#include "ggml-impl.h"
|
#include "ggml-impl.h"
|
||||||
#include "ggml-quants.h"
|
#include "ggml-quants.h"
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
#include "sgemm.h"
|
|
||||||
|
|
||||||
#if defined(_MSC_VER) || defined(__MINGW32__)
|
#if defined(_MSC_VER) || defined(__MINGW32__)
|
||||||
#include <malloc.h> // using malloc.h with MSC/MINGW
|
#include <malloc.h> // using malloc.h with MSC/MINGW
|
||||||
|
@ -37,6 +36,10 @@
|
||||||
#undef GGML_USE_LLAMAFILE
|
#undef GGML_USE_LLAMAFILE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef GGML_USE_LLAMAFILE
|
||||||
|
#include "sgemm.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(_MSC_VER)
|
#if defined(_MSC_VER)
|
||||||
// disable "possible loss of data" to avoid hundreds of casts
|
// disable "possible loss of data" to avoid hundreds of casts
|
||||||
// we should just be careful :)
|
// we should just be careful :)
|
||||||
|
@ -1949,6 +1952,7 @@ inline static void ggml_vec_tanh_f32 (const int n, float * y, const float * x) {
|
||||||
inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
|
inline static void ggml_vec_elu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : expf(x[i])-1; }
|
||||||
inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
|
inline static void ggml_vec_relu_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = (x[i] > 0.f) ? x[i] : 0.f; }
|
||||||
inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); }
|
inline static void ggml_vec_leaky_relu_f32 (const int n, float * y, const float * x, const float ns) { for (int i = 0; i < n; ++i) y[i] = ((x[i] > 0.f) ? x[i] : 0.f) + ns * ((x[i] < 0.0f) ? x[i] : 0.f); }
|
||||||
|
inline static void ggml_vec_sigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = 1.f / (1.f + expf(-x[i])); }
|
||||||
// TODO: optimize performance
|
// TODO: optimize performance
|
||||||
inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
|
inline static void ggml_vec_hardswish_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = x[i] * fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
|
||||||
inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
|
inline static void ggml_vec_hardsigmoid_f32 (const int n, float * y, const float * x) { for (int i = 0; i < n; ++i) y[i] = fminf(1.0f, fmaxf(0.0f, (x[i] + 3.0f) / 6.0f)); }
|
||||||
|
@ -2185,7 +2189,6 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
|
||||||
"SOFT_MAX_BACK",
|
"SOFT_MAX_BACK",
|
||||||
"ROPE",
|
"ROPE",
|
||||||
"ROPE_BACK",
|
"ROPE_BACK",
|
||||||
"ALIBI",
|
|
||||||
"CLAMP",
|
"CLAMP",
|
||||||
"CONV_TRANSPOSE_1D",
|
"CONV_TRANSPOSE_1D",
|
||||||
"IM2COL",
|
"IM2COL",
|
||||||
|
@ -2227,7 +2230,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
|
||||||
"CROSS_ENTROPY_LOSS_BACK",
|
"CROSS_ENTROPY_LOSS_BACK",
|
||||||
};
|
};
|
||||||
|
|
||||||
static_assert(GGML_OP_COUNT == 77, "GGML_OP_COUNT != 77");
|
static_assert(GGML_OP_COUNT == 76, "GGML_OP_COUNT != 76");
|
||||||
|
|
||||||
static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
||||||
"none",
|
"none",
|
||||||
|
@ -2276,7 +2279,6 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
||||||
"soft_max_back(x)",
|
"soft_max_back(x)",
|
||||||
"rope(x)",
|
"rope(x)",
|
||||||
"rope_back(x)",
|
"rope_back(x)",
|
||||||
"alibi(x)",
|
|
||||||
"clamp(x)",
|
"clamp(x)",
|
||||||
"conv_transpose_1d(x)",
|
"conv_transpose_1d(x)",
|
||||||
"im2col(x)",
|
"im2col(x)",
|
||||||
|
@ -2318,7 +2320,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
|
||||||
"cross_entropy_loss_back(x,y)",
|
"cross_entropy_loss_back(x,y)",
|
||||||
};
|
};
|
||||||
|
|
||||||
static_assert(GGML_OP_COUNT == 77, "GGML_OP_COUNT != 77");
|
static_assert(GGML_OP_COUNT == 76, "GGML_OP_COUNT != 76");
|
||||||
|
|
||||||
static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
|
static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
|
||||||
|
|
||||||
|
@ -2331,6 +2333,7 @@ static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = {
|
||||||
"TANH",
|
"TANH",
|
||||||
"ELU",
|
"ELU",
|
||||||
"RELU",
|
"RELU",
|
||||||
|
"SIGMOID",
|
||||||
"GELU",
|
"GELU",
|
||||||
"GELU_QUICK",
|
"GELU_QUICK",
|
||||||
"SILU",
|
"SILU",
|
||||||
|
@ -2338,7 +2341,7 @@ static const char * GGML_UNARY_OP_NAME[GGML_UNARY_OP_COUNT] = {
|
||||||
"HARDSIGMOID",
|
"HARDSIGMOID",
|
||||||
};
|
};
|
||||||
|
|
||||||
static_assert(GGML_UNARY_OP_COUNT == 12, "GGML_UNARY_OP_COUNT != 12");
|
static_assert(GGML_UNARY_OP_COUNT == 13, "GGML_UNARY_OP_COUNT != 13");
|
||||||
|
|
||||||
|
|
||||||
static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
|
static_assert(sizeof(struct ggml_object)%GGML_MEM_ALIGN == 0, "ggml_object size must be a multiple of GGML_MEM_ALIGN");
|
||||||
|
@ -4563,6 +4566,20 @@ struct ggml_tensor * ggml_leaky_relu(
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ggml_sigmoid
|
||||||
|
|
||||||
|
struct ggml_tensor * ggml_sigmoid(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a) {
|
||||||
|
return ggml_unary(ctx, a, GGML_UNARY_OP_SIGMOID);
|
||||||
|
}
|
||||||
|
|
||||||
|
struct ggml_tensor * ggml_sigmoid_inplace(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a) {
|
||||||
|
return ggml_unary_inplace(ctx, a, GGML_UNARY_OP_SIGMOID);
|
||||||
|
}
|
||||||
|
|
||||||
// ggml_gelu
|
// ggml_gelu
|
||||||
|
|
||||||
struct ggml_tensor * ggml_gelu(
|
struct ggml_tensor * ggml_gelu(
|
||||||
|
@ -5646,7 +5663,6 @@ static struct ggml_tensor * ggml_soft_max_impl(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * mask,
|
struct ggml_tensor * mask,
|
||||||
struct ggml_tensor * pos,
|
|
||||||
float scale,
|
float scale,
|
||||||
float max_bias,
|
float max_bias,
|
||||||
bool inplace) {
|
bool inplace) {
|
||||||
|
@ -5660,18 +5676,8 @@ static struct ggml_tensor * ggml_soft_max_impl(
|
||||||
GGML_ASSERT(mask->ne[1] >= a->ne[1]);
|
GGML_ASSERT(mask->ne[1] >= a->ne[1]);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (pos) {
|
|
||||||
GGML_ASSERT(ggml_is_vector(pos));
|
|
||||||
GGML_ASSERT(pos->type == GGML_TYPE_F16 || pos->type == GGML_TYPE_F32);
|
|
||||||
GGML_ASSERT(pos->ne[0] == a->ne[0]);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (pos && mask) {
|
|
||||||
GGML_ASSERT(pos->type == mask->type);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (max_bias > 0.0f) {
|
if (max_bias > 0.0f) {
|
||||||
GGML_ASSERT(pos);
|
GGML_ASSERT(mask);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_node = false;
|
bool is_node = false;
|
||||||
|
@ -5689,7 +5695,6 @@ static struct ggml_tensor * ggml_soft_max_impl(
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
||||||
result->src[0] = a;
|
result->src[0] = a;
|
||||||
result->src[1] = mask;
|
result->src[1] = mask;
|
||||||
result->src[2] = pos;
|
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -5697,23 +5702,22 @@ static struct ggml_tensor * ggml_soft_max_impl(
|
||||||
struct ggml_tensor * ggml_soft_max(
|
struct ggml_tensor * ggml_soft_max(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a) {
|
struct ggml_tensor * a) {
|
||||||
return ggml_soft_max_impl(ctx, a, NULL, NULL, 1.0f, 0.0f, false);
|
return ggml_soft_max_impl(ctx, a, NULL, 1.0f, 0.0f, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * ggml_soft_max_inplace(
|
struct ggml_tensor * ggml_soft_max_inplace(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a) {
|
struct ggml_tensor * a) {
|
||||||
return ggml_soft_max_impl(ctx, a, NULL, NULL, 1.0f, 0.0f, true);
|
return ggml_soft_max_impl(ctx, a, NULL, 1.0f, 0.0f, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * ggml_soft_max_ext(
|
struct ggml_tensor * ggml_soft_max_ext(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * mask,
|
struct ggml_tensor * mask,
|
||||||
struct ggml_tensor * pos,
|
|
||||||
float scale,
|
float scale,
|
||||||
float max_bias) {
|
float max_bias) {
|
||||||
return ggml_soft_max_impl(ctx, a, mask, pos, scale, max_bias, false);
|
return ggml_soft_max_impl(ctx, a, mask, scale, max_bias, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
// ggml_soft_max_back
|
// ggml_soft_max_back
|
||||||
|
@ -5928,37 +5932,6 @@ struct ggml_tensor * ggml_rope_back(
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ggml_alibi
|
|
||||||
|
|
||||||
struct ggml_tensor * ggml_alibi(
|
|
||||||
struct ggml_context * ctx,
|
|
||||||
struct ggml_tensor * a,
|
|
||||||
int n_past,
|
|
||||||
int n_head,
|
|
||||||
float bias_max) {
|
|
||||||
GGML_ASSERT(n_past >= 0);
|
|
||||||
bool is_node = false;
|
|
||||||
|
|
||||||
if (a->grad) {
|
|
||||||
GGML_ASSERT(false); // TODO: implement backward
|
|
||||||
is_node = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO: when implement backward, fix this:
|
|
||||||
//struct ggml_tensor * result = inplace ? ggml_view_tensor(ctx, a) : ggml_dup_tensor(ctx, a);
|
|
||||||
struct ggml_tensor * result = ggml_view_tensor(ctx, a);
|
|
||||||
|
|
||||||
int32_t op_params[3] = { n_past, n_head };
|
|
||||||
memcpy(op_params + 2, &bias_max, sizeof(float));
|
|
||||||
ggml_set_op_params(result, op_params, sizeof(op_params));
|
|
||||||
|
|
||||||
result->op = GGML_OP_ALIBI;
|
|
||||||
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
|
|
||||||
result->src[0] = a;
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
// ggml_clamp
|
// ggml_clamp
|
||||||
|
|
||||||
struct ggml_tensor * ggml_clamp(
|
struct ggml_tensor * ggml_clamp(
|
||||||
|
@ -6486,9 +6459,11 @@ struct ggml_tensor * ggml_flash_attn_ext(
|
||||||
struct ggml_tensor * k,
|
struct ggml_tensor * k,
|
||||||
struct ggml_tensor * v,
|
struct ggml_tensor * v,
|
||||||
struct ggml_tensor * mask,
|
struct ggml_tensor * mask,
|
||||||
float scale) {
|
float scale,
|
||||||
|
float max_bias) {
|
||||||
GGML_ASSERT(ggml_can_mul_mat(k, q));
|
GGML_ASSERT(ggml_can_mul_mat(k, q));
|
||||||
// TODO: check if vT can be multiplied by (k*qT)
|
// TODO: check if vT can be multiplied by (k*qT)
|
||||||
|
|
||||||
if (mask) {
|
if (mask) {
|
||||||
GGML_ASSERT(ggml_is_contiguous(mask));
|
GGML_ASSERT(ggml_is_contiguous(mask));
|
||||||
GGML_ASSERT(mask->ne[2] == 1);
|
GGML_ASSERT(mask->ne[2] == 1);
|
||||||
|
@ -6498,6 +6473,10 @@ struct ggml_tensor * ggml_flash_attn_ext(
|
||||||
//GGML_ASSERT(ggml_can_repeat_rows(mask, qk));
|
//GGML_ASSERT(ggml_can_repeat_rows(mask, qk));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (max_bias > 0.0f) {
|
||||||
|
GGML_ASSERT(mask);
|
||||||
|
}
|
||||||
|
|
||||||
bool is_node = false;
|
bool is_node = false;
|
||||||
|
|
||||||
if (q->grad || k->grad || v->grad) {
|
if (q->grad || k->grad || v->grad) {
|
||||||
|
@ -6508,7 +6487,7 @@ struct ggml_tensor * ggml_flash_attn_ext(
|
||||||
int64_t ne[4] = { q->ne[0], q->ne[2], q->ne[1], q->ne[3] };
|
int64_t ne[4] = { q->ne[0], q->ne[2], q->ne[1], q->ne[3] };
|
||||||
struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
|
struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
|
||||||
|
|
||||||
float params[] = { scale };
|
float params[] = { scale, max_bias };
|
||||||
ggml_set_op_params(result, params, sizeof(params));
|
ggml_set_op_params(result, params, sizeof(params));
|
||||||
|
|
||||||
result->op = GGML_OP_FLASH_ATTN_EXT;
|
result->op = GGML_OP_FLASH_ATTN_EXT;
|
||||||
|
@ -6528,7 +6507,7 @@ void ggml_flash_attn_ext_set_prec(
|
||||||
|
|
||||||
const int32_t prec_i32 = (int32_t) prec;
|
const int32_t prec_i32 = (int32_t) prec;
|
||||||
|
|
||||||
ggml_set_op_params_i32(a, 1, prec_i32); // scale is on first pos
|
ggml_set_op_params_i32(a, 2, prec_i32); // scale is on first pos, max_bias on second
|
||||||
}
|
}
|
||||||
|
|
||||||
// ggml_flash_ff
|
// ggml_flash_ff
|
||||||
|
@ -10892,6 +10871,52 @@ static void ggml_compute_forward_relu(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ggml_compute_forward_sigmoid
|
||||||
|
|
||||||
|
static void ggml_compute_forward_sigmoid_f32(
|
||||||
|
const struct ggml_compute_params * params,
|
||||||
|
struct ggml_tensor * dst) {
|
||||||
|
|
||||||
|
const struct ggml_tensor * src0 = dst->src[0];
|
||||||
|
|
||||||
|
assert(params->ith == 0);
|
||||||
|
assert(ggml_are_same_shape(src0, dst));
|
||||||
|
|
||||||
|
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int n = ggml_nrows(src0);
|
||||||
|
const int nc = src0->ne[0];
|
||||||
|
|
||||||
|
assert(dst->nb[0] == sizeof(float));
|
||||||
|
assert(src0->nb[0] == sizeof(float));
|
||||||
|
|
||||||
|
for (int i = 0; i < n; i++) {
|
||||||
|
ggml_vec_sigmoid_f32(nc,
|
||||||
|
(float *) ((char *) dst->data + i*( dst->nb[1])),
|
||||||
|
(float *) ((char *) src0->data + i*(src0->nb[1])));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void ggml_compute_forward_sigmoid(
|
||||||
|
const struct ggml_compute_params * params,
|
||||||
|
struct ggml_tensor * dst) {
|
||||||
|
|
||||||
|
const struct ggml_tensor * src0 = dst->src[0];
|
||||||
|
|
||||||
|
switch (src0->type) {
|
||||||
|
case GGML_TYPE_F32:
|
||||||
|
{
|
||||||
|
ggml_compute_forward_sigmoid_f32(params, dst);
|
||||||
|
} break;
|
||||||
|
default:
|
||||||
|
{
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
} break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// ggml_compute_forward_gelu
|
// ggml_compute_forward_gelu
|
||||||
|
|
||||||
static void ggml_compute_forward_gelu_f32(
|
static void ggml_compute_forward_gelu_f32(
|
||||||
|
@ -13333,7 +13358,6 @@ static void ggml_compute_forward_soft_max_f32(
|
||||||
|
|
||||||
const struct ggml_tensor * src0 = dst->src[0];
|
const struct ggml_tensor * src0 = dst->src[0];
|
||||||
const struct ggml_tensor * src1 = dst->src[1];
|
const struct ggml_tensor * src1 = dst->src[1];
|
||||||
const struct ggml_tensor * src2 = dst->src[2];
|
|
||||||
|
|
||||||
assert(ggml_is_contiguous(dst));
|
assert(ggml_is_contiguous(dst));
|
||||||
assert(ggml_are_same_shape(src0, dst));
|
assert(ggml_are_same_shape(src0, dst));
|
||||||
|
@ -13359,8 +13383,8 @@ static void ggml_compute_forward_soft_max_f32(
|
||||||
|
|
||||||
// TODO: is this supposed to be ceil instead of floor?
|
// TODO: is this supposed to be ceil instead of floor?
|
||||||
// https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370
|
// https://huggingface.co/mosaicml/mpt-7b/blob/main/attention.py#L370
|
||||||
const uint32_t n_head_kv = ne02;
|
const uint32_t n_head = ne02;
|
||||||
const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head_kv));
|
const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head));
|
||||||
|
|
||||||
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
@ -13377,13 +13401,13 @@ static void ggml_compute_forward_soft_max_f32(
|
||||||
|
|
||||||
float * wp = (float *) params->wdata + (nc + CACHE_LINE_SIZE_F32) * ith;
|
float * wp = (float *) params->wdata + (nc + CACHE_LINE_SIZE_F32) * ith;
|
||||||
|
|
||||||
// when max_bias <= 0.0f, src2 is not used and we default it to src0 to avoid branching
|
const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16);
|
||||||
ggml_fp16_t * pos_f16 = src2 ? (ggml_fp16_t *) src2->data : src0->data;
|
|
||||||
float * pos_f32 = src2 ? (float *) src2->data : src0->data;
|
|
||||||
|
|
||||||
const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16) || (src2 && src2->type == GGML_TYPE_F16);
|
|
||||||
|
|
||||||
for (int i1 = ir0; i1 < ir1; i1++) {
|
for (int i1 = ir0; i1 < ir1; i1++) {
|
||||||
|
// ALiBi
|
||||||
|
const uint32_t h = (i1/ne01)%ne02; // head
|
||||||
|
const float slope = (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1) : 1.0f;
|
||||||
|
|
||||||
float * sp = (float *)((char *) src0->data + i1*src0->nb[1]);
|
float * sp = (float *)((char *) src0->data + i1*src0->nb[1]);
|
||||||
float * dp = (float *)((char *) dst->data + i1*dst->nb[1]);
|
float * dp = (float *)((char *) dst->data + i1*dst->nb[1]);
|
||||||
|
|
||||||
|
@ -13396,27 +13420,11 @@ static void ggml_compute_forward_soft_max_f32(
|
||||||
if (mp_f32) {
|
if (mp_f32) {
|
||||||
if (use_f16) {
|
if (use_f16) {
|
||||||
for (int i = 0; i < nc; ++i) {
|
for (int i = 0; i < nc; ++i) {
|
||||||
wp[i] += GGML_FP16_TO_FP32(mp_f16[i]);
|
wp[i] += slope*GGML_FP16_TO_FP32(mp_f16[i]);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (int i = 0; i < nc; ++i) {
|
for (int i = 0; i < nc; ++i) {
|
||||||
wp[i] += mp_f32[i];
|
wp[i] += slope*mp_f32[i];
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ALiBi bias
|
|
||||||
if (max_bias > 0.0f) {
|
|
||||||
const uint32_t h = (i1/ne01)%ne02; // head
|
|
||||||
const float slope = h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1);
|
|
||||||
|
|
||||||
if (use_f16) {
|
|
||||||
for (int i = 0; i < nc; ++i) {
|
|
||||||
wp[i] += slope*GGML_FP16_TO_FP32(pos_f16[i]);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
for (int i = 0; i < nc; ++i) {
|
|
||||||
wp[i] += slope*pos_f32[i];
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -13578,178 +13586,6 @@ static void ggml_compute_forward_soft_max_back(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ggml_compute_forward_alibi
|
|
||||||
|
|
||||||
static void ggml_compute_forward_alibi_f32(
|
|
||||||
const struct ggml_compute_params * params,
|
|
||||||
struct ggml_tensor * dst) {
|
|
||||||
|
|
||||||
const struct ggml_tensor * src0 = dst->src[0];
|
|
||||||
|
|
||||||
assert(params->ith == 0);
|
|
||||||
|
|
||||||
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
//const int n_past = ((int32_t *) dst->op_params)[0];
|
|
||||||
const int n_head = ((int32_t *) dst->op_params)[1];
|
|
||||||
float max_bias;
|
|
||||||
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
|
|
||||||
|
|
||||||
const int64_t ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
|
|
||||||
const int64_t ne1 = src0->ne[1]; // seq_len_without_past
|
|
||||||
const int64_t ne2 = src0->ne[2]; // n_head -> this is k
|
|
||||||
//const int64_t ne3 = src0->ne[3]; // 1 -> bsz
|
|
||||||
|
|
||||||
const int64_t n = ggml_nrows(src0);
|
|
||||||
const int64_t ne2_ne3 = n/ne1; // ne2*ne3
|
|
||||||
|
|
||||||
const size_t nb0 = src0->nb[0];
|
|
||||||
const size_t nb1 = src0->nb[1];
|
|
||||||
const size_t nb2 = src0->nb[2];
|
|
||||||
//const int nb3 = src0->nb[3];
|
|
||||||
|
|
||||||
GGML_ASSERT(nb0 == sizeof(float));
|
|
||||||
GGML_ASSERT(n_head == ne2);
|
|
||||||
|
|
||||||
// add alibi to src0 (KQ_scaled)
|
|
||||||
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
|
||||||
|
|
||||||
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
|
|
||||||
|
|
||||||
for (int64_t k = 0; k < ne2_ne3; k++) {
|
|
||||||
// TODO: k*nb2 or k*nb3
|
|
||||||
float m_k;
|
|
||||||
|
|
||||||
if (k < n_heads_log2_floor) {
|
|
||||||
m_k = powf(m0, k + 1);
|
|
||||||
} else {
|
|
||||||
m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int64_t i = 0; i < ne0; i++) {
|
|
||||||
for (int64_t j = 0; j < ne1; j++) {
|
|
||||||
float * const src = (float *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
|
|
||||||
float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
|
|
||||||
pdst[0] = i * m_k + src[0];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_compute_forward_alibi_f16(
|
|
||||||
const struct ggml_compute_params * params,
|
|
||||||
struct ggml_tensor * dst) {
|
|
||||||
|
|
||||||
const struct ggml_tensor * src0 = dst->src[0];
|
|
||||||
|
|
||||||
assert(params->ith == 0);
|
|
||||||
|
|
||||||
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
//const int n_past = ((int32_t *) dst->op_params)[0];
|
|
||||||
const int n_head = ((int32_t *) dst->op_params)[1];
|
|
||||||
float max_bias;
|
|
||||||
memcpy(&max_bias, (int32_t *) dst->op_params + 2, sizeof(float));
|
|
||||||
|
|
||||||
const int ne0 = src0->ne[0]; // all_seq_len = n_past + ne1
|
|
||||||
const int ne1 = src0->ne[1]; // seq_len_without_past
|
|
||||||
const int ne2 = src0->ne[2]; // n_head -> this is k
|
|
||||||
//const int ne3 = src0->ne[3]; // 1 -> bsz
|
|
||||||
|
|
||||||
const int n = ggml_nrows(src0);
|
|
||||||
const int ne2_ne3 = n/ne1; // ne2*ne3
|
|
||||||
|
|
||||||
const int nb0 = src0->nb[0];
|
|
||||||
const int nb1 = src0->nb[1];
|
|
||||||
const int nb2 = src0->nb[2];
|
|
||||||
//const int nb3 = src0->nb[3];
|
|
||||||
|
|
||||||
GGML_ASSERT(nb0 == sizeof(ggml_fp16_t));
|
|
||||||
//GGML_ASSERT(ne1 + n_past == ne0); (void) n_past;
|
|
||||||
GGML_ASSERT(n_head == ne2);
|
|
||||||
|
|
||||||
// add alibi to src0 (KQ_scaled)
|
|
||||||
const int n_heads_log2_floor = 1 << (int) floor(log2(n_head));
|
|
||||||
|
|
||||||
const float m0 = powf(2.0f, -(max_bias) / n_heads_log2_floor);
|
|
||||||
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_heads_log2_floor);
|
|
||||||
|
|
||||||
for (int k = 0; k < ne2_ne3; k++) {
|
|
||||||
// TODO: k*nb2 or k*nb3
|
|
||||||
float m_k;
|
|
||||||
|
|
||||||
if (k < n_heads_log2_floor) {
|
|
||||||
m_k = powf(m0, k + 1);
|
|
||||||
} else {
|
|
||||||
m_k = powf(m1, 2 * (k - n_heads_log2_floor) + 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (int i = 0; i < ne0; i++) {
|
|
||||||
for (int j = 0; j < ne1; j++) {
|
|
||||||
ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i*nb0 + j*nb1 + k*nb2);
|
|
||||||
float * pdst = (float *)((char *) dst->data + i*nb0 + j*nb1 + k*nb2);
|
|
||||||
|
|
||||||
// we return F32
|
|
||||||
pdst[0] = i * m_k + GGML_FP16_TO_FP32(src[0]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ggml_compute_forward_alibi(
|
|
||||||
const struct ggml_compute_params * params,
|
|
||||||
struct ggml_tensor * dst) {
|
|
||||||
|
|
||||||
const struct ggml_tensor * src0 = dst->src[0];
|
|
||||||
|
|
||||||
switch (src0->type) {
|
|
||||||
case GGML_TYPE_F16:
|
|
||||||
{
|
|
||||||
ggml_compute_forward_alibi_f16(params, dst);
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_F32:
|
|
||||||
{
|
|
||||||
ggml_compute_forward_alibi_f32(params, dst);
|
|
||||||
} break;
|
|
||||||
case GGML_TYPE_BF16:
|
|
||||||
case GGML_TYPE_Q4_0:
|
|
||||||
case GGML_TYPE_Q4_1:
|
|
||||||
case GGML_TYPE_Q5_0:
|
|
||||||
case GGML_TYPE_Q5_1:
|
|
||||||
case GGML_TYPE_Q8_0:
|
|
||||||
case GGML_TYPE_Q8_1:
|
|
||||||
case GGML_TYPE_Q2_K:
|
|
||||||
case GGML_TYPE_Q3_K:
|
|
||||||
case GGML_TYPE_Q4_K:
|
|
||||||
case GGML_TYPE_Q5_K:
|
|
||||||
case GGML_TYPE_Q6_K:
|
|
||||||
case GGML_TYPE_IQ2_XXS:
|
|
||||||
case GGML_TYPE_IQ2_XS:
|
|
||||||
case GGML_TYPE_IQ3_XXS:
|
|
||||||
case GGML_TYPE_IQ1_S:
|
|
||||||
case GGML_TYPE_IQ1_M:
|
|
||||||
case GGML_TYPE_IQ4_NL:
|
|
||||||
case GGML_TYPE_IQ4_XS:
|
|
||||||
case GGML_TYPE_IQ3_S:
|
|
||||||
case GGML_TYPE_IQ2_S:
|
|
||||||
case GGML_TYPE_Q8_K:
|
|
||||||
case GGML_TYPE_I8:
|
|
||||||
case GGML_TYPE_I16:
|
|
||||||
case GGML_TYPE_I32:
|
|
||||||
case GGML_TYPE_I64:
|
|
||||||
case GGML_TYPE_F64:
|
|
||||||
case GGML_TYPE_COUNT:
|
|
||||||
{
|
|
||||||
GGML_ASSERT(false);
|
|
||||||
} break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ggml_compute_forward_clamp
|
// ggml_compute_forward_clamp
|
||||||
|
|
||||||
static void ggml_compute_forward_clamp_f32(
|
static void ggml_compute_forward_clamp_f32(
|
||||||
|
@ -15764,7 +15600,16 @@ static void ggml_compute_forward_flash_attn_ext_f16(
|
||||||
const int ir1 = MIN(ir0 + dr, nr);
|
const int ir1 = MIN(ir0 + dr, nr);
|
||||||
|
|
||||||
float scale = 1.0f;
|
float scale = 1.0f;
|
||||||
|
float max_bias = 0.0f;
|
||||||
|
|
||||||
memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
|
memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
|
||||||
|
memcpy(&max_bias, (float *) dst->op_params + 1, sizeof(float));
|
||||||
|
|
||||||
|
const uint32_t n_head = neq2;
|
||||||
|
const uint32_t n_head_log2 = 1u << (uint32_t) floor(log2(n_head));
|
||||||
|
|
||||||
|
const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
|
||||||
|
const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
|
||||||
|
|
||||||
// loop over n_batch and n_head
|
// loop over n_batch and n_head
|
||||||
for (int ir = ir0; ir < ir1; ++ir) {
|
for (int ir = ir0; ir < ir1; ++ir) {
|
||||||
|
@ -15773,6 +15618,9 @@ static void ggml_compute_forward_flash_attn_ext_f16(
|
||||||
const int iq2 = (ir - iq3*neq2*neq1)/neq1;
|
const int iq2 = (ir - iq3*neq2*neq1)/neq1;
|
||||||
const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
|
const int iq1 = (ir - iq3*neq2*neq1 - iq2*neq1);
|
||||||
|
|
||||||
|
const uint32_t h = iq2; // head
|
||||||
|
const float slope = (max_bias > 0.0f) ? h < n_head_log2 ? powf(m0, h + 1) : powf(m1, 2*(h - n_head_log2) + 1) : 1.0f;
|
||||||
|
|
||||||
float S = 0.0f;
|
float S = 0.0f;
|
||||||
float M = -INFINITY;
|
float M = -INFINITY;
|
||||||
|
|
||||||
|
@ -15796,7 +15644,7 @@ static void ggml_compute_forward_flash_attn_ext_f16(
|
||||||
// loop over n_kv and n_head_kv
|
// loop over n_kv and n_head_kv
|
||||||
// ref: https://arxiv.org/pdf/2112.05682.pdf
|
// ref: https://arxiv.org/pdf/2112.05682.pdf
|
||||||
for (int64_t ic = 0; ic < nek1; ++ic) {
|
for (int64_t ic = 0; ic < nek1; ++ic) {
|
||||||
const float mv = mp ? GGML_FP16_TO_FP32(mp[ic]) : 0.0f;
|
const float mv = mp ? slope*GGML_FP16_TO_FP32(mp[ic]) : 0.0f;
|
||||||
if (mv == -INFINITY) {
|
if (mv == -INFINITY) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -15867,7 +15715,7 @@ static void ggml_compute_forward_flash_attn_ext(
|
||||||
const struct ggml_tensor * v,
|
const struct ggml_tensor * v,
|
||||||
const struct ggml_tensor * mask,
|
const struct ggml_tensor * mask,
|
||||||
struct ggml_tensor * dst) {
|
struct ggml_tensor * dst) {
|
||||||
switch (dst->op_params[1]) {
|
switch (dst->op_params[2]) {
|
||||||
case GGML_PREC_DEFAULT:
|
case GGML_PREC_DEFAULT:
|
||||||
case GGML_PREC_F32:
|
case GGML_PREC_F32:
|
||||||
{
|
{
|
||||||
|
@ -16834,6 +16682,10 @@ static void ggml_compute_forward_unary(
|
||||||
{
|
{
|
||||||
ggml_compute_forward_relu(params, dst);
|
ggml_compute_forward_relu(params, dst);
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_UNARY_OP_SIGMOID:
|
||||||
|
{
|
||||||
|
ggml_compute_forward_sigmoid(params, dst);
|
||||||
|
} break;
|
||||||
case GGML_UNARY_OP_GELU:
|
case GGML_UNARY_OP_GELU:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_gelu(params, dst);
|
ggml_compute_forward_gelu(params, dst);
|
||||||
|
@ -17630,10 +17482,6 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
|
||||||
{
|
{
|
||||||
ggml_compute_forward_rope_back(params, tensor);
|
ggml_compute_forward_rope_back(params, tensor);
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
{
|
|
||||||
ggml_compute_forward_alibi(params, tensor);
|
|
||||||
} break;
|
|
||||||
case GGML_OP_CLAMP:
|
case GGML_OP_CLAMP:
|
||||||
{
|
{
|
||||||
ggml_compute_forward_clamp(params, tensor);
|
ggml_compute_forward_clamp(params, tensor);
|
||||||
|
@ -18652,10 +18500,6 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
||||||
zero_table);
|
zero_table);
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
{
|
|
||||||
GGML_ASSERT(false); // TODO: not implemented
|
|
||||||
} break;
|
|
||||||
case GGML_OP_CLAMP:
|
case GGML_OP_CLAMP:
|
||||||
{
|
{
|
||||||
GGML_ASSERT(false); // TODO: not implemented
|
GGML_ASSERT(false); // TODO: not implemented
|
||||||
|
@ -18826,6 +18670,10 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
|
||||||
zero_table);
|
zero_table);
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
|
case GGML_UNARY_OP_SIGMOID:
|
||||||
|
{
|
||||||
|
GGML_ASSERT(false); // TODO: not implemented
|
||||||
|
} break;
|
||||||
case GGML_UNARY_OP_GELU:
|
case GGML_UNARY_OP_GELU:
|
||||||
{
|
{
|
||||||
GGML_ASSERT(false); // TODO: not implemented
|
GGML_ASSERT(false); // TODO: not implemented
|
||||||
|
@ -19355,6 +19203,7 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads, int n_cur_
|
||||||
case GGML_UNARY_OP_TANH:
|
case GGML_UNARY_OP_TANH:
|
||||||
case GGML_UNARY_OP_ELU:
|
case GGML_UNARY_OP_ELU:
|
||||||
case GGML_UNARY_OP_RELU:
|
case GGML_UNARY_OP_RELU:
|
||||||
|
case GGML_UNARY_OP_SIGMOID:
|
||||||
case GGML_UNARY_OP_HARDSWISH: // to opt for multiple threads
|
case GGML_UNARY_OP_HARDSWISH: // to opt for multiple threads
|
||||||
case GGML_UNARY_OP_HARDSIGMOID: // to opt for multiple threads
|
case GGML_UNARY_OP_HARDSIGMOID: // to opt for multiple threads
|
||||||
{
|
{
|
||||||
|
@ -19428,10 +19277,6 @@ static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads, int n_cur_
|
||||||
{
|
{
|
||||||
n_tasks = n_threads;
|
n_tasks = n_threads;
|
||||||
} break;
|
} break;
|
||||||
case GGML_OP_ALIBI:
|
|
||||||
{
|
|
||||||
n_tasks = 1; //TODO
|
|
||||||
} break;
|
|
||||||
case GGML_OP_CLAMP:
|
case GGML_OP_CLAMP:
|
||||||
{
|
{
|
||||||
n_tasks = 1; //TODO
|
n_tasks = 1; //TODO
|
||||||
|
|
27
ggml.h
27
ggml.h
|
@ -468,7 +468,6 @@ extern "C" {
|
||||||
GGML_OP_SOFT_MAX_BACK,
|
GGML_OP_SOFT_MAX_BACK,
|
||||||
GGML_OP_ROPE,
|
GGML_OP_ROPE,
|
||||||
GGML_OP_ROPE_BACK,
|
GGML_OP_ROPE_BACK,
|
||||||
GGML_OP_ALIBI,
|
|
||||||
GGML_OP_CLAMP,
|
GGML_OP_CLAMP,
|
||||||
GGML_OP_CONV_TRANSPOSE_1D,
|
GGML_OP_CONV_TRANSPOSE_1D,
|
||||||
GGML_OP_IM2COL,
|
GGML_OP_IM2COL,
|
||||||
|
@ -520,6 +519,7 @@ extern "C" {
|
||||||
GGML_UNARY_OP_TANH,
|
GGML_UNARY_OP_TANH,
|
||||||
GGML_UNARY_OP_ELU,
|
GGML_UNARY_OP_ELU,
|
||||||
GGML_UNARY_OP_RELU,
|
GGML_UNARY_OP_RELU,
|
||||||
|
GGML_UNARY_OP_SIGMOID,
|
||||||
GGML_UNARY_OP_GELU,
|
GGML_UNARY_OP_GELU,
|
||||||
GGML_UNARY_OP_GELU_QUICK,
|
GGML_UNARY_OP_GELU_QUICK,
|
||||||
GGML_UNARY_OP_SILU,
|
GGML_UNARY_OP_SILU,
|
||||||
|
@ -1074,6 +1074,14 @@ extern "C" {
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a);
|
struct ggml_tensor * a);
|
||||||
|
|
||||||
|
GGML_API struct ggml_tensor * ggml_sigmoid(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a);
|
||||||
|
|
||||||
|
GGML_API struct ggml_tensor * ggml_sigmoid_inplace(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a);
|
||||||
|
|
||||||
GGML_API struct ggml_tensor * ggml_gelu(
|
GGML_API struct ggml_tensor * ggml_gelu(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a);
|
struct ggml_tensor * a);
|
||||||
|
@ -1428,15 +1436,13 @@ extern "C" {
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a);
|
struct ggml_tensor * a);
|
||||||
|
|
||||||
// fused soft_max(a*scale + mask + pos[i]*(ALiBi slope))
|
// fused soft_max(a*scale + mask*(ALiBi slope))
|
||||||
// mask is optional
|
// mask is optional
|
||||||
// pos is required when max_bias > 0.0f
|
|
||||||
// max_bias = 0.0f for no ALiBi
|
// max_bias = 0.0f for no ALiBi
|
||||||
GGML_API struct ggml_tensor * ggml_soft_max_ext(
|
GGML_API struct ggml_tensor * ggml_soft_max_ext(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * mask,
|
struct ggml_tensor * mask,
|
||||||
struct ggml_tensor * pos,
|
|
||||||
float scale,
|
float scale,
|
||||||
float max_bias);
|
float max_bias);
|
||||||
|
|
||||||
|
@ -1538,16 +1544,6 @@ extern "C" {
|
||||||
float xpos_base,
|
float xpos_base,
|
||||||
bool xpos_down);
|
bool xpos_down);
|
||||||
|
|
||||||
// alibi position embedding
|
|
||||||
// in-place, returns view(a)
|
|
||||||
GGML_DEPRECATED(GGML_API struct ggml_tensor * ggml_alibi(
|
|
||||||
struct ggml_context * ctx,
|
|
||||||
struct ggml_tensor * a,
|
|
||||||
int n_past,
|
|
||||||
int n_head,
|
|
||||||
float bias_max),
|
|
||||||
"use ggml_soft_max_ext instead (will be removed in Mar 2024)");
|
|
||||||
|
|
||||||
// clamp
|
// clamp
|
||||||
// in-place, returns view(a)
|
// in-place, returns view(a)
|
||||||
GGML_API struct ggml_tensor * ggml_clamp(
|
GGML_API struct ggml_tensor * ggml_clamp(
|
||||||
|
@ -1744,7 +1740,8 @@ extern "C" {
|
||||||
struct ggml_tensor * k,
|
struct ggml_tensor * k,
|
||||||
struct ggml_tensor * v,
|
struct ggml_tensor * v,
|
||||||
struct ggml_tensor * mask,
|
struct ggml_tensor * mask,
|
||||||
float scale);
|
float scale,
|
||||||
|
float max_bias);
|
||||||
|
|
||||||
GGML_API void ggml_flash_attn_ext_set_prec(
|
GGML_API void ggml_flash_attn_ext_set_prec(
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
|
|
|
@ -1,4 +1,5 @@
|
||||||
from .constants import *
|
from .constants import *
|
||||||
|
from .lazy import *
|
||||||
from .gguf_reader import *
|
from .gguf_reader import *
|
||||||
from .gguf_writer import *
|
from .gguf_writer import *
|
||||||
from .tensor_mapping import *
|
from .tensor_mapping import *
|
||||||
|
|
|
@ -10,6 +10,7 @@ from typing import Any
|
||||||
GGUF_MAGIC = 0x46554747 # "GGUF"
|
GGUF_MAGIC = 0x46554747 # "GGUF"
|
||||||
GGUF_VERSION = 3
|
GGUF_VERSION = 3
|
||||||
GGUF_DEFAULT_ALIGNMENT = 32
|
GGUF_DEFAULT_ALIGNMENT = 32
|
||||||
|
GGML_QUANT_VERSION = 2 # GGML_QNT_VERSION from ggml.h
|
||||||
|
|
||||||
#
|
#
|
||||||
# metadata keys
|
# metadata keys
|
||||||
|
@ -118,6 +119,7 @@ class MODEL_ARCH(IntEnum):
|
||||||
REFACT = auto()
|
REFACT = auto()
|
||||||
BERT = auto()
|
BERT = auto()
|
||||||
NOMIC_BERT = auto()
|
NOMIC_BERT = auto()
|
||||||
|
JINA_BERT_V2 = auto()
|
||||||
BLOOM = auto()
|
BLOOM = auto()
|
||||||
STABLELM = auto()
|
STABLELM = auto()
|
||||||
QWEN = auto()
|
QWEN = auto()
|
||||||
|
@ -195,6 +197,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
|
||||||
MODEL_ARCH.REFACT: "refact",
|
MODEL_ARCH.REFACT: "refact",
|
||||||
MODEL_ARCH.BERT: "bert",
|
MODEL_ARCH.BERT: "bert",
|
||||||
MODEL_ARCH.NOMIC_BERT: "nomic-bert",
|
MODEL_ARCH.NOMIC_BERT: "nomic-bert",
|
||||||
|
MODEL_ARCH.JINA_BERT_V2: "jina-bert-v2",
|
||||||
MODEL_ARCH.BLOOM: "bloom",
|
MODEL_ARCH.BLOOM: "bloom",
|
||||||
MODEL_ARCH.STABLELM: "stablelm",
|
MODEL_ARCH.STABLELM: "stablelm",
|
||||||
MODEL_ARCH.QWEN: "qwen",
|
MODEL_ARCH.QWEN: "qwen",
|
||||||
|
@ -380,6 +383,22 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
|
||||||
MODEL_TENSOR.FFN_UP,
|
MODEL_TENSOR.FFN_UP,
|
||||||
MODEL_TENSOR.LAYER_OUT_NORM,
|
MODEL_TENSOR.LAYER_OUT_NORM,
|
||||||
],
|
],
|
||||||
|
MODEL_ARCH.JINA_BERT_V2: [
|
||||||
|
MODEL_TENSOR.TOKEN_EMBD,
|
||||||
|
MODEL_TENSOR.TOKEN_EMBD_NORM,
|
||||||
|
MODEL_TENSOR.TOKEN_TYPES,
|
||||||
|
MODEL_TENSOR.ATTN_OUT_NORM,
|
||||||
|
MODEL_TENSOR.ATTN_Q,
|
||||||
|
MODEL_TENSOR.ATTN_Q_NORM,
|
||||||
|
MODEL_TENSOR.ATTN_K,
|
||||||
|
MODEL_TENSOR.ATTN_K_NORM,
|
||||||
|
MODEL_TENSOR.ATTN_V,
|
||||||
|
MODEL_TENSOR.ATTN_OUT,
|
||||||
|
MODEL_TENSOR.FFN_UP,
|
||||||
|
MODEL_TENSOR.FFN_GATE,
|
||||||
|
MODEL_TENSOR.FFN_DOWN,
|
||||||
|
MODEL_TENSOR.LAYER_OUT_NORM,
|
||||||
|
],
|
||||||
MODEL_ARCH.MPT: [
|
MODEL_ARCH.MPT: [
|
||||||
MODEL_TENSOR.TOKEN_EMBD,
|
MODEL_TENSOR.TOKEN_EMBD,
|
||||||
MODEL_TENSOR.OUTPUT_NORM,
|
MODEL_TENSOR.OUTPUT_NORM,
|
||||||
|
@ -820,6 +839,49 @@ class GGMLQuantizationType(IntEnum):
|
||||||
BF16 = 30
|
BF16 = 30
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: add GGMLFileType from ggml_ftype in ggml.h
|
||||||
|
|
||||||
|
|
||||||
|
# from llama_ftype in llama.h
|
||||||
|
# ALL VALUES SHOULD BE THE SAME HERE AS THEY ARE OVER THERE.
|
||||||
|
class LlamaFileType(IntEnum):
|
||||||
|
ALL_F32 = 0
|
||||||
|
MOSTLY_F16 = 1 # except 1d tensors
|
||||||
|
MOSTLY_Q4_0 = 2 # except 1d tensors
|
||||||
|
MOSTLY_Q4_1 = 3 # except 1d tensors
|
||||||
|
MOSTLY_Q4_1_SOME_F16 = 4 # tok_embeddings.weight and output.weight are F16
|
||||||
|
# MOSTLY_Q4_2 = 5 # support has been removed
|
||||||
|
# MOSTLY_Q4_3 = 6 # support has been removed
|
||||||
|
MOSTLY_Q8_0 = 7 # except 1d tensors
|
||||||
|
MOSTLY_Q5_0 = 8 # except 1d tensors
|
||||||
|
MOSTLY_Q5_1 = 9 # except 1d tensors
|
||||||
|
MOSTLY_Q2_K = 10 # except 1d tensors
|
||||||
|
MOSTLY_Q3_K_S = 11 # except 1d tensors
|
||||||
|
MOSTLY_Q3_K_M = 12 # except 1d tensors
|
||||||
|
MOSTLY_Q3_K_L = 13 # except 1d tensors
|
||||||
|
MOSTLY_Q4_K_S = 14 # except 1d tensors
|
||||||
|
MOSTLY_Q4_K_M = 15 # except 1d tensors
|
||||||
|
MOSTLY_Q5_K_S = 16 # except 1d tensors
|
||||||
|
MOSTLY_Q5_K_M = 17 # except 1d tensors
|
||||||
|
MOSTLY_Q6_K = 18 # except 1d tensors
|
||||||
|
MOSTLY_IQ2_XXS = 19 # except 1d tensors
|
||||||
|
MOSTLY_IQ2_XS = 20 # except 1d tensors
|
||||||
|
MOSTLY_Q2_K_S = 21 # except 1d tensors
|
||||||
|
MOSTLY_IQ3_XS = 22 # except 1d tensors
|
||||||
|
MOSTLY_IQ3_XXS = 23 # except 1d tensors
|
||||||
|
MOSTLY_IQ1_S = 24 # except 1d tensors
|
||||||
|
MOSTLY_IQ4_NL = 25 # except 1d tensors
|
||||||
|
MOSTLY_IQ3_S = 26 # except 1d tensors
|
||||||
|
MOSTLY_IQ3_M = 27 # except 1d tensors
|
||||||
|
MOSTLY_IQ2_S = 28 # except 1d tensors
|
||||||
|
MOSTLY_IQ2_M = 29 # except 1d tensors
|
||||||
|
MOSTLY_IQ4_XS = 30 # except 1d tensors
|
||||||
|
MOSTLY_IQ1_M = 31 # except 1d tensors
|
||||||
|
MOSTLY_BF16 = 32 # except 1d tensors
|
||||||
|
|
||||||
|
GUESSED = 1024 # not specified in the model file
|
||||||
|
|
||||||
|
|
||||||
class GGUFEndian(IntEnum):
|
class GGUFEndian(IntEnum):
|
||||||
LITTLE = 0
|
LITTLE = 0
|
||||||
BIG = 1
|
BIG = 1
|
||||||
|
|
|
@ -7,7 +7,7 @@ import struct
|
||||||
import tempfile
|
import tempfile
|
||||||
from enum import Enum, auto
|
from enum import Enum, auto
|
||||||
from io import BufferedWriter
|
from io import BufferedWriter
|
||||||
from typing import IO, Any, Callable, Sequence, Mapping
|
from typing import IO, Any, Sequence, Mapping
|
||||||
from string import ascii_letters, digits
|
from string import ascii_letters, digits
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
@ -28,47 +28,6 @@ from .constants import (
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class LazyTensor:
|
|
||||||
data: Callable[[], np.ndarray[Any, Any]]
|
|
||||||
# to avoid too deep recursion
|
|
||||||
functions: list[Callable[[np.ndarray[Any, Any]], np.ndarray[Any, Any]]]
|
|
||||||
dtype: np.dtype[Any]
|
|
||||||
shape: tuple[int, ...]
|
|
||||||
|
|
||||||
def __init__(self, data: Callable[[], np.ndarray[Any, Any]], *, dtype: type, shape: tuple[int, ...]):
|
|
||||||
self.data = data
|
|
||||||
self.functions = []
|
|
||||||
self.dtype = np.dtype(dtype)
|
|
||||||
self.shape = shape
|
|
||||||
|
|
||||||
def astype(self, dtype: type, **kwargs) -> LazyTensor:
|
|
||||||
self.functions.append(lambda n: n.astype(dtype, **kwargs))
|
|
||||||
self.dtype = np.dtype(dtype)
|
|
||||||
return self
|
|
||||||
|
|
||||||
@property
|
|
||||||
def nbytes(self) -> int:
|
|
||||||
size = 1
|
|
||||||
for n in self.shape:
|
|
||||||
size *= n
|
|
||||||
return size * self.dtype.itemsize
|
|
||||||
|
|
||||||
def tofile(self, *args, **kwargs) -> None:
|
|
||||||
data = self.data()
|
|
||||||
for f in self.functions:
|
|
||||||
data = f(data)
|
|
||||||
assert data.shape == self.shape
|
|
||||||
assert data.dtype == self.dtype
|
|
||||||
assert data.nbytes == self.nbytes
|
|
||||||
self.functions = []
|
|
||||||
self.data = lambda: data
|
|
||||||
data.tofile(*args, **kwargs)
|
|
||||||
|
|
||||||
def byteswap(self, *args, **kwargs) -> LazyTensor:
|
|
||||||
self.functions.append(lambda n: n.byteswap(*args, **kwargs))
|
|
||||||
return self
|
|
||||||
|
|
||||||
|
|
||||||
class WriterState(Enum):
|
class WriterState(Enum):
|
||||||
EMPTY = auto()
|
EMPTY = auto()
|
||||||
HEADER = auto()
|
HEADER = auto()
|
||||||
|
@ -79,7 +38,7 @@ class WriterState(Enum):
|
||||||
class GGUFWriter:
|
class GGUFWriter:
|
||||||
fout: BufferedWriter
|
fout: BufferedWriter
|
||||||
temp_file: tempfile.SpooledTemporaryFile[bytes] | None
|
temp_file: tempfile.SpooledTemporaryFile[bytes] | None
|
||||||
tensors: list[np.ndarray[Any, Any] | LazyTensor]
|
tensors: list[np.ndarray[Any, Any]]
|
||||||
_simple_value_packing = {
|
_simple_value_packing = {
|
||||||
GGUFValueType.UINT8: "B",
|
GGUFValueType.UINT8: "B",
|
||||||
GGUFValueType.INT8: "b",
|
GGUFValueType.INT8: "b",
|
||||||
|
@ -278,7 +237,7 @@ class GGUFWriter:
|
||||||
self.ti_data_count += 1
|
self.ti_data_count += 1
|
||||||
|
|
||||||
def add_tensor(
|
def add_tensor(
|
||||||
self, name: str, tensor: np.ndarray[Any, Any] | LazyTensor, raw_shape: Sequence[int] | None = None,
|
self, name: str, tensor: np.ndarray[Any, Any], raw_shape: Sequence[int] | None = None,
|
||||||
raw_dtype: GGMLQuantizationType | None = None,
|
raw_dtype: GGMLQuantizationType | None = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
if self.endianess == GGUFEndian.BIG:
|
if self.endianess == GGUFEndian.BIG:
|
||||||
|
@ -303,7 +262,7 @@ class GGUFWriter:
|
||||||
if pad != 0:
|
if pad != 0:
|
||||||
fp.write(bytes([0] * pad))
|
fp.write(bytes([0] * pad))
|
||||||
|
|
||||||
def write_tensor_data(self, tensor: np.ndarray[Any, Any] | LazyTensor) -> None:
|
def write_tensor_data(self, tensor: np.ndarray[Any, Any]) -> None:
|
||||||
if self.state is not WriterState.TI_DATA:
|
if self.state is not WriterState.TI_DATA:
|
||||||
raise ValueError(f'Expected output file to contain tensor info, got {self.state}')
|
raise ValueError(f'Expected output file to contain tensor info, got {self.state}')
|
||||||
|
|
||||||
|
@ -391,7 +350,7 @@ class GGUFWriter:
|
||||||
def add_name(self, name: str) -> None:
|
def add_name(self, name: str) -> None:
|
||||||
self.add_string(Keys.General.NAME, name)
|
self.add_string(Keys.General.NAME, name)
|
||||||
|
|
||||||
def add_quantization_version(self, quantization_version: GGMLQuantizationType) -> None:
|
def add_quantization_version(self, quantization_version: int) -> None:
|
||||||
self.add_uint32(
|
self.add_uint32(
|
||||||
Keys.General.QUANTIZATION_VERSION, quantization_version)
|
Keys.General.QUANTIZATION_VERSION, quantization_version)
|
||||||
|
|
||||||
|
|
225
gguf-py/gguf/lazy.py
Normal file
225
gguf-py/gguf/lazy.py
Normal file
|
@ -0,0 +1,225 @@
|
||||||
|
from __future__ import annotations
|
||||||
|
from abc import ABC, ABCMeta, abstractmethod
|
||||||
|
|
||||||
|
import logging
|
||||||
|
from typing import Any, Callable
|
||||||
|
from collections import deque
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from numpy.typing import DTypeLike
|
||||||
|
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class LazyMeta(ABCMeta):
|
||||||
|
|
||||||
|
def __new__(cls, name: str, bases: tuple[type, ...], namespace: dict[str, Any], **kwargs):
|
||||||
|
def __getattr__(self, __name: str) -> Any:
|
||||||
|
meta_attr = getattr(self._meta, __name)
|
||||||
|
if callable(meta_attr):
|
||||||
|
return type(self)._wrap_fn(
|
||||||
|
(lambda s, *args, **kwargs: getattr(s, __name)(*args, **kwargs)),
|
||||||
|
use_self=self,
|
||||||
|
)
|
||||||
|
elif isinstance(meta_attr, self._tensor_type):
|
||||||
|
# e.g. self.T with torch.Tensor should still be wrapped
|
||||||
|
return type(self)._wrap_fn(lambda s: getattr(s, __name))(self)
|
||||||
|
else:
|
||||||
|
# no need to wrap non-tensor properties,
|
||||||
|
# and they likely don't depend on the actual contents of the tensor
|
||||||
|
return meta_attr
|
||||||
|
|
||||||
|
namespace["__getattr__"] = __getattr__
|
||||||
|
|
||||||
|
# need to make a builder for the wrapped wrapper to copy the name,
|
||||||
|
# or else it fails with very cryptic error messages,
|
||||||
|
# because somehow the same string would end up in every closures
|
||||||
|
def mk_wrap(op_name: str, *, meta_noop: bool = False):
|
||||||
|
# need to wrap the wrapper to get self
|
||||||
|
def wrapped_special_op(self, *args, **kwargs):
|
||||||
|
return type(self)._wrap_fn(
|
||||||
|
getattr(type(self)._tensor_type, op_name),
|
||||||
|
meta_noop=meta_noop,
|
||||||
|
)(self, *args, **kwargs)
|
||||||
|
return wrapped_special_op
|
||||||
|
|
||||||
|
# special methods bypass __getattr__, so they need to be added manually
|
||||||
|
# ref: https://docs.python.org/3/reference/datamodel.html#special-lookup
|
||||||
|
# NOTE: doing this from a metaclass is very convenient
|
||||||
|
# TODO: make this even more comprehensive
|
||||||
|
for binary_op in (
|
||||||
|
"lt", "le", "eq", "ne", "ge", "gt", "not"
|
||||||
|
"abs", "add", "and", "floordiv", "invert", "lshift", "mod", "mul", "matmul",
|
||||||
|
"neg", "or", "pos", "pow", "rshift", "sub", "truediv", "xor",
|
||||||
|
"iadd", "iand", "ifloordiv", "ilshift", "imod", "imul", "ior", "irshift", "isub", "ixor",
|
||||||
|
"radd", "rand", "rfloordiv", "rmul", "ror", "rpow", "rsub", "rtruediv", "rxor",
|
||||||
|
):
|
||||||
|
attr_name = f"__{binary_op}__"
|
||||||
|
# the result of these operators usually has the same shape and dtype as the input,
|
||||||
|
# so evaluation on the meta tensor can be skipped.
|
||||||
|
namespace[attr_name] = mk_wrap(attr_name, meta_noop=True)
|
||||||
|
|
||||||
|
for special_op in (
|
||||||
|
"getitem", "setitem", "len",
|
||||||
|
):
|
||||||
|
attr_name = f"__{special_op}__"
|
||||||
|
namespace[attr_name] = mk_wrap(attr_name, meta_noop=False)
|
||||||
|
|
||||||
|
return super().__new__(cls, name, bases, namespace, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
# Tree of lazy tensors
|
||||||
|
class LazyBase(ABC, metaclass=LazyMeta):
|
||||||
|
_tensor_type: type
|
||||||
|
_meta: Any
|
||||||
|
_data: Any | None
|
||||||
|
_lazy: deque[LazyBase] # shared within a graph, to avoid deep recursion when making eager
|
||||||
|
_args: tuple
|
||||||
|
_func: Callable[[tuple], Any] | None
|
||||||
|
|
||||||
|
def __init__(self, *, meta: Any, data: Any | None = None, lazy: deque[LazyBase] | None = None, args: tuple = (), func: Callable[[tuple], Any] | None = None):
|
||||||
|
super().__init__()
|
||||||
|
self._meta = meta
|
||||||
|
self._data = data
|
||||||
|
self._lazy = lazy if lazy is not None else deque()
|
||||||
|
self._args = args
|
||||||
|
self._func = func
|
||||||
|
assert self._func is not None or self._data is not None
|
||||||
|
if self._data is None:
|
||||||
|
self._lazy.append(self)
|
||||||
|
|
||||||
|
def __init_subclass__(cls) -> None:
|
||||||
|
if "_tensor_type" not in cls.__dict__:
|
||||||
|
raise TypeError(f"property '_tensor_type' must be defined for {cls!r}")
|
||||||
|
return super().__init_subclass__()
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def _recurse_apply(o: Any, fn: Callable[[Any], Any]) -> Any:
|
||||||
|
# TODO: dict and set
|
||||||
|
if isinstance(o, (list, tuple)):
|
||||||
|
L = []
|
||||||
|
for item in o:
|
||||||
|
L.append(LazyBase._recurse_apply(item, fn))
|
||||||
|
if isinstance(o, tuple):
|
||||||
|
L = tuple(L)
|
||||||
|
return L
|
||||||
|
elif isinstance(o, LazyBase):
|
||||||
|
return fn(o)
|
||||||
|
else:
|
||||||
|
return o
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def _wrap_fn(cls, fn: Callable, *, use_self: LazyBase | None = None, meta_noop: bool | DTypeLike = False) -> Callable[[Any], Any]:
|
||||||
|
def wrapped_fn(*args, **kwargs):
|
||||||
|
if kwargs is None:
|
||||||
|
kwargs = {}
|
||||||
|
args = ((use_self,) if use_self is not None else ()) + args
|
||||||
|
|
||||||
|
meta_args = LazyBase._recurse_apply(args, lambda t: t._meta)
|
||||||
|
|
||||||
|
if isinstance(meta_noop, bool) and not meta_noop:
|
||||||
|
try:
|
||||||
|
res = fn(*meta_args, **kwargs)
|
||||||
|
except NotImplementedError:
|
||||||
|
# running some operations on PyTorch's Meta tensors can cause this exception
|
||||||
|
res = None
|
||||||
|
else:
|
||||||
|
# some operators don't need to actually run on the meta tensors
|
||||||
|
assert len(args) > 0
|
||||||
|
res = args[0]
|
||||||
|
assert isinstance(res, cls)
|
||||||
|
res = res._meta
|
||||||
|
# allow operations to override the dtype
|
||||||
|
if meta_noop is not True:
|
||||||
|
res = cls.meta_with_dtype(res, meta_noop)
|
||||||
|
|
||||||
|
if isinstance(res, cls._tensor_type):
|
||||||
|
def collect_replace(t: LazyBase):
|
||||||
|
if collect_replace.shared_lazy is None:
|
||||||
|
collect_replace.shared_lazy = t._lazy
|
||||||
|
else:
|
||||||
|
collect_replace.shared_lazy.extend(t._lazy)
|
||||||
|
t._lazy = collect_replace.shared_lazy
|
||||||
|
|
||||||
|
# emulating a static variable
|
||||||
|
collect_replace.shared_lazy = None
|
||||||
|
|
||||||
|
LazyBase._recurse_apply(args, collect_replace)
|
||||||
|
|
||||||
|
shared_lazy = collect_replace.shared_lazy
|
||||||
|
|
||||||
|
return cls(meta=cls.eager_to_meta(res), lazy=shared_lazy, args=args, func=lambda a: fn(*a, **kwargs))
|
||||||
|
else:
|
||||||
|
del res # not needed
|
||||||
|
# non-tensor return likely relies on the contents of the args
|
||||||
|
# (e.g. the result of torch.equal)
|
||||||
|
eager_args = cls.to_eager(args)
|
||||||
|
return fn(*eager_args, **kwargs)
|
||||||
|
return wrapped_fn
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def to_eager(cls, t: Any) -> Any:
|
||||||
|
def simple_to_eager(_t: LazyBase) -> Any:
|
||||||
|
def already_eager_to_eager(_t: LazyBase) -> Any:
|
||||||
|
assert _t._data is not None
|
||||||
|
return _t._data
|
||||||
|
|
||||||
|
while _t._data is None:
|
||||||
|
lt = _t._lazy.popleft()
|
||||||
|
if lt._data is not None:
|
||||||
|
raise ValueError(f"{lt} did not belong in the lazy queue")
|
||||||
|
assert lt._func is not None
|
||||||
|
lt._args = cls._recurse_apply(lt._args, already_eager_to_eager)
|
||||||
|
lt._data = lt._func(lt._args)
|
||||||
|
# sanity check
|
||||||
|
assert lt._data.dtype == lt._meta.dtype
|
||||||
|
assert lt._data.shape == lt._meta.shape
|
||||||
|
|
||||||
|
return _t._data
|
||||||
|
|
||||||
|
# recurse into lists and/or tuples, keeping their structure
|
||||||
|
return cls._recurse_apply(t, simple_to_eager)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def eager_to_meta(cls, t: Any) -> Any:
|
||||||
|
return cls.meta_with_dtype(t, t.dtype)
|
||||||
|
|
||||||
|
# must be overridden, meta tensor init is backend-specific
|
||||||
|
@classmethod
|
||||||
|
@abstractmethod
|
||||||
|
def meta_with_dtype(cls, m: Any, dtype: Any) -> Any: pass
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_eager(cls, t: Any) -> Any:
|
||||||
|
if type(t) is cls:
|
||||||
|
# already eager
|
||||||
|
return t
|
||||||
|
elif isinstance(t, cls._tensor_type):
|
||||||
|
return cls(meta=cls.eager_to_meta(t), data=t)
|
||||||
|
else:
|
||||||
|
return TypeError(f"{type(t)!r} is not compatible with {cls._tensor_type!r}")
|
||||||
|
|
||||||
|
|
||||||
|
class LazyNumpyTensor(LazyBase):
|
||||||
|
_tensor_type = np.ndarray
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def meta_with_dtype(cls, m: np.ndarray[Any, Any], dtype: DTypeLike) -> np.ndarray[Any, Any]:
|
||||||
|
# The initial idea was to use np.nan as the fill value,
|
||||||
|
# but non-float types like np.int16 can't use that.
|
||||||
|
# So zero it is.
|
||||||
|
cheat = np.zeros(1, dtype)
|
||||||
|
return np.lib.stride_tricks.as_strided(cheat, m.shape, (0 for _ in m.shape))
|
||||||
|
|
||||||
|
def astype(self, dtype, *args, **kwargs):
|
||||||
|
meta = type(self).meta_with_dtype(self._meta, dtype)
|
||||||
|
full_args = (self, dtype,) + args
|
||||||
|
# very important to pass the shared _lazy deque, or else there's an infinite loop somewhere.
|
||||||
|
return type(self)(meta=meta, args=full_args, lazy=self._lazy, func=(lambda a: a[0].astype(*a[1:], **kwargs)))
|
||||||
|
|
||||||
|
def tofile(self, *args, **kwargs):
|
||||||
|
eager = LazyNumpyTensor.to_eager(self)
|
||||||
|
return eager.tofile(*args, **kwargs)
|
||||||
|
|
||||||
|
# TODO: __array_function__
|
|
@ -137,6 +137,7 @@ class TensorNameMap:
|
||||||
"layers.{bid}.attention.wk", # llama-pth
|
"layers.{bid}.attention.wk", # llama-pth
|
||||||
"encoder.layer.{bid}.attention.self.key", # bert
|
"encoder.layer.{bid}.attention.self.key", # bert
|
||||||
"transformer.h.{bid}.attn.k_proj", # gpt-j
|
"transformer.h.{bid}.attn.k_proj", # gpt-j
|
||||||
|
"transformer.h.{bid}.attn.k", # refact
|
||||||
"model.layers.layers.{bid}.self_attn.k_proj", # plamo
|
"model.layers.layers.{bid}.self_attn.k_proj", # plamo
|
||||||
"model.layers.{bid}.attention.wk", # internlm2
|
"model.layers.{bid}.attention.wk", # internlm2
|
||||||
"transformer.decoder_layer.{bid}.multi_head_attention.key" # Grok
|
"transformer.decoder_layer.{bid}.multi_head_attention.key" # Grok
|
||||||
|
@ -148,6 +149,7 @@ class TensorNameMap:
|
||||||
"layers.{bid}.attention.wv", # llama-pth
|
"layers.{bid}.attention.wv", # llama-pth
|
||||||
"encoder.layer.{bid}.attention.self.value", # bert
|
"encoder.layer.{bid}.attention.self.value", # bert
|
||||||
"transformer.h.{bid}.attn.v_proj", # gpt-j
|
"transformer.h.{bid}.attn.v_proj", # gpt-j
|
||||||
|
"transformer.h.{bid}.attn.v", # refact
|
||||||
"model.layers.layers.{bid}.self_attn.v_proj", # plamo
|
"model.layers.layers.{bid}.self_attn.v_proj", # plamo
|
||||||
"model.layers.{bid}.attention.wv", # internlm2
|
"model.layers.{bid}.attention.wv", # internlm2
|
||||||
"transformer.decoder_layer.{bid}.multi_head_attention.value" # Grok
|
"transformer.decoder_layer.{bid}.multi_head_attention.value" # Grok
|
||||||
|
@ -229,6 +231,7 @@ class TensorNameMap:
|
||||||
"layers.{bid}.feed_forward.w3", # llama-pth
|
"layers.{bid}.feed_forward.w3", # llama-pth
|
||||||
"encoder.layer.{bid}.intermediate.dense", # bert
|
"encoder.layer.{bid}.intermediate.dense", # bert
|
||||||
"transformer.h.{bid}.mlp.fc_in", # gpt-j
|
"transformer.h.{bid}.mlp.fc_in", # gpt-j
|
||||||
|
"transformer.h.{bid}.mlp.linear_3", # refact
|
||||||
"language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
|
"language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
|
||||||
"model.layers.{bid}.mlp.dense_h_to_4h", # persimmon
|
"model.layers.{bid}.mlp.dense_h_to_4h", # persimmon
|
||||||
"transformer.h.{bid}.mlp.w1", # qwen
|
"transformer.h.{bid}.mlp.w1", # qwen
|
||||||
|
@ -240,6 +243,7 @@ class TensorNameMap:
|
||||||
"model.layers.{bid}.feed_forward.w3", # internlm2
|
"model.layers.{bid}.feed_forward.w3", # internlm2
|
||||||
"encoder.layers.{bid}.mlp.fc11", # nomic-bert
|
"encoder.layers.{bid}.mlp.fc11", # nomic-bert
|
||||||
"model.layers.{bid}.mlp.c_fc", # starcoder2
|
"model.layers.{bid}.mlp.c_fc", # starcoder2
|
||||||
|
"encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_UP_EXP: (
|
MODEL_TENSOR.FFN_UP_EXP: (
|
||||||
|
@ -266,6 +270,8 @@ class TensorNameMap:
|
||||||
"model.layers.layers.{bid}.mlp.gate_proj", # plamo
|
"model.layers.layers.{bid}.mlp.gate_proj", # plamo
|
||||||
"model.layers.{bid}.feed_forward.w1", # internlm2
|
"model.layers.{bid}.feed_forward.w1", # internlm2
|
||||||
"encoder.layers.{bid}.mlp.fc12", # nomic-bert
|
"encoder.layers.{bid}.mlp.fc12", # nomic-bert
|
||||||
|
"encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2
|
||||||
|
"transformer.h.{bid}.mlp.linear_1", # refact
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_GATE_EXP: (
|
MODEL_TENSOR.FFN_GATE_EXP: (
|
||||||
|
@ -299,6 +305,7 @@ class TensorNameMap:
|
||||||
"model.layers.{bid}.feed_forward.w2", # internlm2
|
"model.layers.{bid}.feed_forward.w2", # internlm2
|
||||||
"encoder.layers.{bid}.mlp.fc2", # nomic-bert
|
"encoder.layers.{bid}.mlp.fc2", # nomic-bert
|
||||||
"model.layers.{bid}.mlp.c_proj", # starcoder2
|
"model.layers.{bid}.mlp.c_proj", # starcoder2
|
||||||
|
"encoder.layer.{bid}.mlp.wo", # jina-bert-v2
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.FFN_DOWN_EXP: (
|
MODEL_TENSOR.FFN_DOWN_EXP: (
|
||||||
|
@ -317,6 +324,7 @@ class TensorNameMap:
|
||||||
"model.layers.{bid}.self_attn.q_layernorm", # persimmon
|
"model.layers.{bid}.self_attn.q_layernorm", # persimmon
|
||||||
"model.layers.{bid}.self_attn.q_norm", # cohere
|
"model.layers.{bid}.self_attn.q_norm", # cohere
|
||||||
"transformer.blocks.{bid}.attn.q_ln", # sea-lion
|
"transformer.blocks.{bid}.attn.q_ln", # sea-lion
|
||||||
|
"encoder.layer.{bid}.attention.self.layer_norm_q" # jina-bert-v2
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.ATTN_K_NORM: (
|
MODEL_TENSOR.ATTN_K_NORM: (
|
||||||
|
@ -324,6 +332,7 @@ class TensorNameMap:
|
||||||
"model.layers.{bid}.self_attn.k_layernorm", # persimmon
|
"model.layers.{bid}.self_attn.k_layernorm", # persimmon
|
||||||
"model.layers.{bid}.self_attn.k_norm", # cohere
|
"model.layers.{bid}.self_attn.k_norm", # cohere
|
||||||
"transformer.blocks.{bid}.attn.k_ln", # sea-lion
|
"transformer.blocks.{bid}.attn.k_ln", # sea-lion
|
||||||
|
"encoder.layer.{bid}.attention.self.layer_norm_k" # jina-bert-v2
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.ROPE_FREQS: (
|
MODEL_TENSOR.ROPE_FREQS: (
|
||||||
|
@ -334,6 +343,7 @@ class TensorNameMap:
|
||||||
"encoder.layer.{bid}.output.LayerNorm", # bert
|
"encoder.layer.{bid}.output.LayerNorm", # bert
|
||||||
"encoder.layers.{bid}.norm2", # nomic-bert
|
"encoder.layers.{bid}.norm2", # nomic-bert
|
||||||
"transformer.decoder_layer.{bid}.rms_norm_3", # Grok
|
"transformer.decoder_layer.{bid}.rms_norm_3", # Grok
|
||||||
|
"encoder.layer.{bid}.mlp.layernorm", # jina-bert-v2
|
||||||
),
|
),
|
||||||
|
|
||||||
MODEL_TENSOR.SSM_IN: (
|
MODEL_TENSOR.SSM_IN: (
|
||||||
|
|
296
llama.cpp
296
llama.cpp
|
@ -205,6 +205,7 @@ enum llm_arch {
|
||||||
LLM_ARCH_REFACT,
|
LLM_ARCH_REFACT,
|
||||||
LLM_ARCH_BERT,
|
LLM_ARCH_BERT,
|
||||||
LLM_ARCH_NOMIC_BERT,
|
LLM_ARCH_NOMIC_BERT,
|
||||||
|
LLM_ARCH_JINA_BERT_V2,
|
||||||
LLM_ARCH_BLOOM,
|
LLM_ARCH_BLOOM,
|
||||||
LLM_ARCH_STABLELM,
|
LLM_ARCH_STABLELM,
|
||||||
LLM_ARCH_QWEN,
|
LLM_ARCH_QWEN,
|
||||||
|
@ -241,6 +242,7 @@ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
|
||||||
{ LLM_ARCH_REFACT, "refact" },
|
{ LLM_ARCH_REFACT, "refact" },
|
||||||
{ LLM_ARCH_BERT, "bert" },
|
{ LLM_ARCH_BERT, "bert" },
|
||||||
{ LLM_ARCH_NOMIC_BERT, "nomic-bert" },
|
{ LLM_ARCH_NOMIC_BERT, "nomic-bert" },
|
||||||
|
{ LLM_ARCH_JINA_BERT_V2, "jina-bert-v2" },
|
||||||
{ LLM_ARCH_BLOOM, "bloom" },
|
{ LLM_ARCH_BLOOM, "bloom" },
|
||||||
{ LLM_ARCH_STABLELM, "stablelm" },
|
{ LLM_ARCH_STABLELM, "stablelm" },
|
||||||
{ LLM_ARCH_QWEN, "qwen" },
|
{ LLM_ARCH_QWEN, "qwen" },
|
||||||
|
@ -691,6 +693,25 @@ static const std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NA
|
||||||
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
LLM_ARCH_JINA_BERT_V2,
|
||||||
|
{
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD, "token_embd" },
|
||||||
|
{ LLM_TENSOR_TOKEN_EMBD_NORM, "token_embd_norm" },
|
||||||
|
{ LLM_TENSOR_TOKEN_TYPES, "token_types" },
|
||||||
|
{ LLM_TENSOR_ATTN_OUT_NORM, "blk.%d.attn_output_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_Q, "blk.%d.attn_q" },
|
||||||
|
{ LLM_TENSOR_ATTN_Q_NORM, "blk.%d.attn_q_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_K, "blk.%d.attn_k" },
|
||||||
|
{ LLM_TENSOR_ATTN_K_NORM, "blk.%d.attn_k_norm" },
|
||||||
|
{ LLM_TENSOR_ATTN_V, "blk.%d.attn_v" },
|
||||||
|
{ LLM_TENSOR_ATTN_OUT, "blk.%d.attn_output" },
|
||||||
|
{ LLM_TENSOR_LAYER_OUT_NORM, "blk.%d.layer_output_norm" },
|
||||||
|
{ LLM_TENSOR_FFN_DOWN, "blk.%d.ffn_down" },
|
||||||
|
{ LLM_TENSOR_FFN_GATE, "blk.%d.ffn_gate" },
|
||||||
|
{ LLM_TENSOR_FFN_UP, "blk.%d.ffn_up" },
|
||||||
|
},
|
||||||
|
},
|
||||||
{
|
{
|
||||||
LLM_ARCH_BLOOM,
|
LLM_ARCH_BLOOM,
|
||||||
{
|
{
|
||||||
|
@ -1845,7 +1866,7 @@ struct llama_hparams {
|
||||||
float f_logit_scale = 0.0f;
|
float f_logit_scale = 0.0f;
|
||||||
|
|
||||||
bool causal_attn = true;
|
bool causal_attn = true;
|
||||||
bool use_alibi = false; // currently, we need KQ_pos data for ALiBi-based models
|
bool use_alibi = false;
|
||||||
|
|
||||||
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE;
|
enum llama_pooling_type pooling_type = LLAMA_POOLING_TYPE_NONE;
|
||||||
enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
|
enum llama_rope_type rope_type = LLAMA_ROPE_TYPE_NONE;
|
||||||
|
@ -2317,7 +2338,6 @@ struct llama_context {
|
||||||
struct ggml_tensor * inp_pos; // I32 [n_batch]
|
struct ggml_tensor * inp_pos; // I32 [n_batch]
|
||||||
struct ggml_tensor * inp_out_ids; // I32 [n_outputs]
|
struct ggml_tensor * inp_out_ids; // I32 [n_outputs]
|
||||||
struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch]
|
struct ggml_tensor * inp_KQ_mask; // F32 [kv_size, n_batch]
|
||||||
struct ggml_tensor * inp_KQ_pos; // F32 [n_kv]
|
|
||||||
struct ggml_tensor * inp_K_shift; // I32 [kv_size]
|
struct ggml_tensor * inp_K_shift; // I32 [kv_size]
|
||||||
struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
|
struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
|
||||||
struct ggml_tensor * inp_cls; // I32 [n_batch]
|
struct ggml_tensor * inp_cls; // I32 [n_batch]
|
||||||
|
@ -3779,6 +3799,12 @@ static void llm_load_hparams(
|
||||||
|
|
||||||
// get hparams kv
|
// get hparams kv
|
||||||
ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
|
ml.get_key(LLM_KV_VOCAB_SIZE, hparams.n_vocab, false) || ml.get_arr_n(LLM_KV_TOKENIZER_LIST, hparams.n_vocab);
|
||||||
|
|
||||||
|
// everything past this point is not vocab-related
|
||||||
|
if (hparams.vocab_only) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
|
ml.get_key(LLM_KV_CONTEXT_LENGTH, hparams.n_ctx_train);
|
||||||
ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
|
ml.get_key(LLM_KV_EMBEDDING_LENGTH, hparams.n_embd);
|
||||||
ml.get_key(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff);
|
ml.get_key(LLM_KV_FEED_FORWARD_LENGTH, hparams.n_ff);
|
||||||
|
@ -3860,7 +3886,7 @@ static void llm_load_hparams(
|
||||||
switch (hparams.n_layer) {
|
switch (hparams.n_layer) {
|
||||||
case 22: model.type = e_model::MODEL_1B; break;
|
case 22: model.type = e_model::MODEL_1B; break;
|
||||||
case 26: model.type = e_model::MODEL_3B; break;
|
case 26: model.type = e_model::MODEL_3B; break;
|
||||||
case 32: model.type = hparams.n_head == hparams.n_head_kv ? e_model::MODEL_7B : e_model::MODEL_8B; break; // LLaMa 8B v3 uses GQA
|
case 32: model.type = hparams.n_vocab < 40000 ? e_model::MODEL_7B : e_model::MODEL_8B; break;
|
||||||
case 40: model.type = e_model::MODEL_13B; break;
|
case 40: model.type = e_model::MODEL_13B; break;
|
||||||
case 48: model.type = e_model::MODEL_34B; break;
|
case 48: model.type = e_model::MODEL_34B; break;
|
||||||
case 60: model.type = e_model::MODEL_30B; break;
|
case 60: model.type = e_model::MODEL_30B; break;
|
||||||
|
@ -3962,6 +3988,19 @@ static void llm_load_hparams(
|
||||||
model.type = e_model::MODEL_335M; break; // bge-large
|
model.type = e_model::MODEL_335M; break; // bge-large
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
|
case LLM_ARCH_JINA_BERT_V2:
|
||||||
|
{
|
||||||
|
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
||||||
|
ml.get_key(LLM_KV_ATTENTION_CAUSAL, hparams.causal_attn);
|
||||||
|
ml.get_key(LLM_KV_TOKENIZER_TOKEN_TYPE_COUNT, hparams.n_vocab_type);
|
||||||
|
ml.get_key(LLM_KV_POOLING_TYPE, hparams.pooling_type);
|
||||||
|
hparams.f_max_alibi_bias = 8.0f;
|
||||||
|
|
||||||
|
switch (hparams.n_layer) {
|
||||||
|
case 4: model.type = e_model::MODEL_33M; break; // jina-embeddings-small
|
||||||
|
case 12: model.type = e_model::MODEL_137M; break; // jina-embeddings-base
|
||||||
|
}
|
||||||
|
} break;
|
||||||
case LLM_ARCH_NOMIC_BERT:
|
case LLM_ARCH_NOMIC_BERT:
|
||||||
{
|
{
|
||||||
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
ml.get_key(LLM_KV_ATTENTION_LAYERNORM_EPS, hparams.f_norm_eps);
|
||||||
|
@ -4383,7 +4422,9 @@ static void llm_load_vocab(
|
||||||
tokenizer_pre == "starcoder") {
|
tokenizer_pre == "starcoder") {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_STARCODER;
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "gpt-2") {
|
tokenizer_pre == "gpt-2" ||
|
||||||
|
tokenizer_pre == "jina-es" ||
|
||||||
|
tokenizer_pre == "jina-de") {
|
||||||
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
|
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_GPT2;
|
||||||
} else if (
|
} else if (
|
||||||
tokenizer_pre == "refact") {
|
tokenizer_pre == "refact") {
|
||||||
|
@ -5242,6 +5283,50 @@ static bool llm_load_tensors(
|
||||||
layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
|
layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
|
||||||
}
|
}
|
||||||
} break;
|
} break;
|
||||||
|
case LLM_ARCH_JINA_BERT_V2:
|
||||||
|
{
|
||||||
|
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // word_embeddings
|
||||||
|
model.type_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_TYPES, "weight"), {n_embd, n_vocab_type}); //token_type_embeddings
|
||||||
|
model.tok_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "weight"), {n_embd}); // LayerNorm
|
||||||
|
model.tok_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD_NORM, "bias"), {n_embd}); //LayerNorm bias
|
||||||
|
|
||||||
|
for (int i = 0; i < n_layer; ++i) {
|
||||||
|
ggml_context * ctx_layer = ctx_for_layer(i);
|
||||||
|
ggml_context * ctx_split = ctx_for_layer_split(i);
|
||||||
|
|
||||||
|
auto & layer = model.layers[i]; // JinaBertLayer
|
||||||
|
|
||||||
|
layer.wq = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_Q, "weight", i), {n_embd, n_embd});
|
||||||
|
layer.bq = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q, "bias", i), {n_embd});
|
||||||
|
|
||||||
|
layer.attn_q_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "weight", i), {n_embd}, false);
|
||||||
|
layer.attn_q_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_Q_NORM, "bias", i), {n_embd}, false);
|
||||||
|
|
||||||
|
layer.wk = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_K, "weight", i), {n_embd, n_embd_gqa});
|
||||||
|
layer.bk = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K, "bias", i), {n_embd_gqa});
|
||||||
|
|
||||||
|
layer.attn_k_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "weight", i), {n_embd}, false);
|
||||||
|
layer.attn_k_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_K_NORM, "bias", i), {n_embd}, false);
|
||||||
|
|
||||||
|
layer.wv = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_V, "weight", i), {n_embd, n_embd_gqa});
|
||||||
|
layer.bv = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_V, "bias", i), {n_embd_gqa});
|
||||||
|
|
||||||
|
layer.wo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "weight", i), {n_embd, n_embd}); //output_dens
|
||||||
|
layer.bo = ml.create_tensor(ctx_split, tn(LLM_TENSOR_ATTN_OUT, "bias", i), {n_embd}); //output_dens
|
||||||
|
|
||||||
|
layer.attn_out_norm = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "weight", i), {n_embd}); //output_norm
|
||||||
|
layer.attn_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_ATTN_OUT_NORM, "bias", i), {n_embd});
|
||||||
|
|
||||||
|
layer.ffn_up = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_UP, "weight", i), {n_embd, n_ff});
|
||||||
|
layer.ffn_gate = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_GATE, "weight", i), {n_embd, n_ff});
|
||||||
|
|
||||||
|
layer.ffn_down = ml.create_tensor(ctx_split, tn(LLM_TENSOR_FFN_DOWN, "weight", i), {n_ff, n_embd});
|
||||||
|
layer.ffn_down_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_FFN_DOWN, "bias", i), {n_embd});
|
||||||
|
|
||||||
|
layer.layer_out_norm = ml.create_tensor(ctx_split, tn(LLM_TENSOR_LAYER_OUT_NORM, "weight", i), {n_embd});
|
||||||
|
layer.layer_out_norm_b = ml.create_tensor(ctx_layer, tn(LLM_TENSOR_LAYER_OUT_NORM, "bias", i), {n_embd});
|
||||||
|
}
|
||||||
|
} break;
|
||||||
case LLM_ARCH_BLOOM:
|
case LLM_ARCH_BLOOM:
|
||||||
{
|
{
|
||||||
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
model.tok_embd = ml.create_tensor(ctx_input, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab});
|
||||||
|
@ -6318,7 +6403,7 @@ static struct ggml_tensor * llm_build_ffn(
|
||||||
llm_ffn_gate_type type_gate,
|
llm_ffn_gate_type type_gate,
|
||||||
const llm_build_cb & cb,
|
const llm_build_cb & cb,
|
||||||
int il) {
|
int il) {
|
||||||
struct ggml_tensor * tmp = ggml_mul_mat(ctx, up, cur);
|
struct ggml_tensor * tmp = up ? ggml_mul_mat(ctx, up, cur) : cur;
|
||||||
cb(tmp, "ffn_up", il);
|
cb(tmp, "ffn_up", il);
|
||||||
|
|
||||||
if (up_b) {
|
if (up_b) {
|
||||||
|
@ -6500,7 +6585,6 @@ static struct ggml_tensor * llm_build_kqv(
|
||||||
struct ggml_tensor * wo_b,
|
struct ggml_tensor * wo_b,
|
||||||
struct ggml_tensor * q_cur,
|
struct ggml_tensor * q_cur,
|
||||||
struct ggml_tensor * kq_mask,
|
struct ggml_tensor * kq_mask,
|
||||||
struct ggml_tensor * kq_pos,
|
|
||||||
int32_t n_tokens,
|
int32_t n_tokens,
|
||||||
int32_t n_kv,
|
int32_t n_kv,
|
||||||
float kq_scale,
|
float kq_scale,
|
||||||
|
@ -6530,10 +6614,6 @@ static struct ggml_tensor * llm_build_kqv(
|
||||||
GGML_UNUSED(model);
|
GGML_UNUSED(model);
|
||||||
GGML_UNUSED(n_ctx);
|
GGML_UNUSED(n_ctx);
|
||||||
|
|
||||||
// note: if this assert triggers, then some check has failed earlier
|
|
||||||
// the idea is to detect during context creation that ALiBi would be used and disable Flash Attention
|
|
||||||
GGML_ASSERT(kq_pos == nullptr && "ALiBi is not yet supported with Flash Attention");
|
|
||||||
|
|
||||||
// split cached v into n_head heads (not transposed)
|
// split cached v into n_head heads (not transposed)
|
||||||
struct ggml_tensor * v =
|
struct ggml_tensor * v =
|
||||||
ggml_view_3d(ctx, kv.v_l[il],
|
ggml_view_3d(ctx, kv.v_l[il],
|
||||||
|
@ -6543,7 +6623,7 @@ static struct ggml_tensor * llm_build_kqv(
|
||||||
0);
|
0);
|
||||||
cb(v, "v", il);
|
cb(v, "v", il);
|
||||||
|
|
||||||
cur = ggml_flash_attn_ext(ctx, q, k, v, kq_mask, kq_scale);
|
cur = ggml_flash_attn_ext(ctx, q, k, v, kq_mask, kq_scale, hparams.f_max_alibi_bias);
|
||||||
|
|
||||||
if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3) {
|
if (model.arch == LLM_ARCH_PHI2 || model.arch == LLM_ARCH_PHI3) {
|
||||||
ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32);
|
ggml_flash_attn_ext_set_prec(cur, GGML_PREC_F32);
|
||||||
|
@ -6574,28 +6654,8 @@ static struct ggml_tensor * llm_build_kqv(
|
||||||
kq = ggml_scale(ctx, kq, 30);
|
kq = ggml_scale(ctx, kq, 30);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(GGML_USE_KOMPUTE)
|
kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, hparams.f_max_alibi_bias);
|
||||||
#pragma message("TODO: ALiBi support in ggml_soft_max_ext is not implemented for Kompute")
|
|
||||||
#pragma message(" Falling back to ggml_alibi(). Will become an error in Mar 2024")
|
|
||||||
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5488")
|
|
||||||
if (hparams.use_alibi) {
|
|
||||||
kq = ggml_scale(ctx, kq, kq_scale);
|
|
||||||
cb(kq, "kq_scaled", il);
|
|
||||||
|
|
||||||
kq = ggml_alibi(ctx, kq, /*n_past*/ 0, n_head, hparams.f_max_alibi_bias);
|
|
||||||
cb(kq, "kq_scaled_alibi", il);
|
|
||||||
|
|
||||||
kq = ggml_add(ctx, kq, kq_mask);
|
|
||||||
cb(kq, "kq_masked", il);
|
|
||||||
|
|
||||||
kq = ggml_soft_max(ctx, kq);
|
|
||||||
cb(kq, "kq_soft_max", il);
|
|
||||||
} else
|
|
||||||
#endif
|
|
||||||
{
|
|
||||||
kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_pos, kq_scale, hparams.f_max_alibi_bias);
|
|
||||||
cb(kq, "kq_soft_max_ext", il);
|
cb(kq, "kq_soft_max_ext", il);
|
||||||
}
|
|
||||||
|
|
||||||
GGML_ASSERT(kv.size == n_ctx);
|
GGML_ASSERT(kv.size == n_ctx);
|
||||||
|
|
||||||
|
@ -6645,7 +6705,6 @@ static struct ggml_tensor * llm_build_kv(
|
||||||
struct ggml_tensor * v_cur,
|
struct ggml_tensor * v_cur,
|
||||||
struct ggml_tensor * q_cur,
|
struct ggml_tensor * q_cur,
|
||||||
struct ggml_tensor * kq_mask,
|
struct ggml_tensor * kq_mask,
|
||||||
struct ggml_tensor * kq_pos,
|
|
||||||
int32_t n_tokens,
|
int32_t n_tokens,
|
||||||
int32_t kv_head,
|
int32_t kv_head,
|
||||||
int32_t n_kv,
|
int32_t n_kv,
|
||||||
|
@ -6664,7 +6723,7 @@ static struct ggml_tensor * llm_build_kv(
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
|
|
||||||
cur = llm_build_kqv(ctx, model, hparams, cparams, kv, graph, wo, wo_b,
|
cur = llm_build_kqv(ctx, model, hparams, cparams, kv, graph, wo, wo_b,
|
||||||
q_cur, kq_mask, kq_pos, n_tokens, n_kv, kq_scale, cb, il);
|
q_cur, kq_mask, n_tokens, n_kv, kq_scale, cb, il);
|
||||||
cb(cur, "kqv_out", il);
|
cb(cur, "kqv_out", il);
|
||||||
|
|
||||||
return cur;
|
return cur;
|
||||||
|
@ -6776,7 +6835,6 @@ struct llm_build_context {
|
||||||
lctx.inp_pos = nullptr;
|
lctx.inp_pos = nullptr;
|
||||||
lctx.inp_out_ids = nullptr;
|
lctx.inp_out_ids = nullptr;
|
||||||
lctx.inp_KQ_mask = nullptr;
|
lctx.inp_KQ_mask = nullptr;
|
||||||
lctx.inp_KQ_pos = nullptr;
|
|
||||||
lctx.inp_K_shift = nullptr;
|
lctx.inp_K_shift = nullptr;
|
||||||
lctx.inp_mean = nullptr;
|
lctx.inp_mean = nullptr;
|
||||||
lctx.inp_cls = nullptr;
|
lctx.inp_cls = nullptr;
|
||||||
|
@ -6932,19 +6990,6 @@ struct llm_build_context {
|
||||||
return flash_attn ? ggml_cast(ctx0, lctx.inp_KQ_mask, GGML_TYPE_F16) : lctx.inp_KQ_mask;
|
return flash_attn ? ggml_cast(ctx0, lctx.inp_KQ_mask, GGML_TYPE_F16) : lctx.inp_KQ_mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
struct ggml_tensor * build_inp_KQ_pos(bool causal = true) {
|
|
||||||
if (causal) {
|
|
||||||
lctx.inp_KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, n_kv);
|
|
||||||
} else {
|
|
||||||
// TODO: this will be needed for ALiBi-based BERT models
|
|
||||||
// https://github.com/ggerganov/llama.cpp/pull/6826
|
|
||||||
lctx.inp_KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, n_tokens);
|
|
||||||
}
|
|
||||||
cb(lctx.inp_KQ_pos, "KQ_pos", -1);
|
|
||||||
ggml_set_input(lctx.inp_KQ_pos);
|
|
||||||
return flash_attn ? ggml_cast(ctx0, lctx.inp_KQ_pos, GGML_TYPE_F16) : lctx.inp_KQ_pos;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ggml_tensor * build_inp_mean() {
|
struct ggml_tensor * build_inp_mean() {
|
||||||
lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
|
lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
|
||||||
cb(lctx.inp_mean, "inp_mean", -1);
|
cb(lctx.inp_mean, "inp_mean", -1);
|
||||||
|
@ -7050,7 +7095,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -7143,9 +7188,6 @@ struct llm_build_context {
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
||||||
|
|
||||||
// positions of the tokens in the KV cache
|
|
||||||
struct ggml_tensor * KQ_pos = build_inp_KQ_pos();
|
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
struct ggml_tensor * inpSA = inpL;
|
struct ggml_tensor * inpSA = inpL;
|
||||||
|
|
||||||
|
@ -7190,7 +7232,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, NULL,
|
model.layers[il].wo, NULL,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -7260,9 +7302,6 @@ struct llm_build_context {
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
||||||
|
|
||||||
// positions of the tokens in the KV cache
|
|
||||||
struct ggml_tensor * KQ_pos = build_inp_KQ_pos();
|
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
struct ggml_tensor * inpSA = inpL;
|
struct ggml_tensor * inpSA = inpL;
|
||||||
|
|
||||||
|
@ -7297,7 +7336,7 @@ struct llm_build_context {
|
||||||
cb(Kcur, "Kcur", il);
|
cb(Kcur, "Kcur", il);
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, NULL,
|
model.layers[il].wo, NULL,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -7417,7 +7456,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, NULL,
|
model.layers[il].wo, NULL,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -7542,7 +7581,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f, cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -7694,7 +7733,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, NULL,
|
model.layers[il].wo, NULL,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -7806,7 +7845,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -8010,7 +8049,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Q, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Q, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -8076,9 +8115,6 @@ struct llm_build_context {
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
||||||
|
|
||||||
// positions of the tokens in the KV cache
|
|
||||||
struct ggml_tensor * KQ_pos = build_inp_KQ_pos();
|
|
||||||
|
|
||||||
for (int il = 0; il < n_layer; ++il) {
|
for (int il = 0; il < n_layer; ++il) {
|
||||||
struct ggml_tensor * inpSA = inpL;
|
struct ggml_tensor * inpSA = inpL;
|
||||||
|
|
||||||
|
@ -8106,7 +8142,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, NULL,
|
model.layers[il].wo, NULL,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -8168,8 +8204,11 @@ struct llm_build_context {
|
||||||
|
|
||||||
struct ggml_tensor * cur;
|
struct ggml_tensor * cur;
|
||||||
struct ggml_tensor * inpL;
|
struct ggml_tensor * inpL;
|
||||||
|
struct ggml_tensor * inp_pos = nullptr;
|
||||||
|
|
||||||
struct ggml_tensor * inp_pos = build_inp_pos();
|
if (model.arch != LLM_ARCH_JINA_BERT_V2) {
|
||||||
|
inp_pos = build_inp_pos();
|
||||||
|
}
|
||||||
struct ggml_tensor * inp_mean = build_inp_mean();
|
struct ggml_tensor * inp_mean = build_inp_mean();
|
||||||
struct ggml_tensor * inp_cls = build_inp_cls();
|
struct ggml_tensor * inp_cls = build_inp_cls();
|
||||||
|
|
||||||
|
@ -8200,13 +8239,26 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * Vcur;
|
struct ggml_tensor * Vcur;
|
||||||
|
|
||||||
// self-attention
|
// self-attention
|
||||||
if (model.arch == LLM_ARCH_BERT) {
|
if (model.arch == LLM_ARCH_BERT || model.arch == LLM_ARCH_JINA_BERT_V2) {
|
||||||
Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), model.layers[il].bq);
|
Qcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wq, cur), model.layers[il].bq);
|
||||||
cb(Qcur, "Qcur", il);
|
cb(Qcur, "Qcur", il);
|
||||||
|
|
||||||
|
if (model.layers[il].attn_q_norm) {
|
||||||
|
Qcur = llm_build_norm(ctx0, Qcur, hparams,
|
||||||
|
model.layers[il].attn_q_norm,
|
||||||
|
model.layers[il].attn_q_norm_b,
|
||||||
|
LLM_NORM, cb, il);
|
||||||
|
}
|
||||||
|
|
||||||
Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, cur), model.layers[il].bk);
|
Kcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wk, cur), model.layers[il].bk);
|
||||||
cb(Kcur, "Kcur", il);
|
cb(Kcur, "Kcur", il);
|
||||||
|
|
||||||
|
if (model.layers[il].attn_k_norm) {
|
||||||
|
Kcur = llm_build_norm(ctx0, Kcur, hparams,
|
||||||
|
model.layers[il].attn_k_norm,
|
||||||
|
model.layers[il].attn_k_norm_b,
|
||||||
|
LLM_NORM, cb, il);
|
||||||
|
}
|
||||||
Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, cur), model.layers[il].bv);
|
Vcur = ggml_add(ctx0, ggml_mul_mat(ctx0, model.layers[il].wv, cur), model.layers[il].bv);
|
||||||
cb(Vcur, "Vcur", il);
|
cb(Vcur, "Vcur", il);
|
||||||
|
|
||||||
|
@ -8246,7 +8298,7 @@ struct llm_build_context {
|
||||||
struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
|
struct ggml_tensor * kq = ggml_mul_mat(ctx0, k, q);
|
||||||
cb(kq, "kq", il);
|
cb(kq, "kq", il);
|
||||||
|
|
||||||
kq = ggml_soft_max_ext(ctx0, kq, KQ_mask, nullptr, 1.0f/sqrtf(float(n_embd_head)), hparams.f_max_alibi_bias);
|
kq = ggml_soft_max_ext(ctx0, kq, KQ_mask, 1.0f/sqrtf(float(n_embd_head)), hparams.f_max_alibi_bias);
|
||||||
cb(kq, "kq_soft_max_ext", il);
|
cb(kq, "kq_soft_max_ext", il);
|
||||||
|
|
||||||
struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_tokens)));
|
struct ggml_tensor * v = ggml_cont(ctx0, ggml_transpose(ctx0, ggml_reshape_2d(ctx0, Vcur, n_embd_gqa, n_tokens)));
|
||||||
|
@ -8297,6 +8349,13 @@ struct llm_build_context {
|
||||||
model.layers[il].ffn_down, model.layers[il].ffn_down_b,
|
model.layers[il].ffn_down, model.layers[il].ffn_down_b,
|
||||||
NULL,
|
NULL,
|
||||||
LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
|
LLM_FFN_GELU, LLM_FFN_SEQ, cb, il);
|
||||||
|
} else if (model.arch == LLM_ARCH_JINA_BERT_V2) {
|
||||||
|
cur = llm_build_ffn(ctx0, cur,
|
||||||
|
model.layers[il].ffn_up, NULL,
|
||||||
|
model.layers[il].ffn_gate, NULL,
|
||||||
|
model.layers[il].ffn_down, model.layers[il].ffn_down_b,
|
||||||
|
NULL,
|
||||||
|
LLM_FFN_GELU, LLM_FFN_PAR, cb, il);
|
||||||
} else {
|
} else {
|
||||||
cur = llm_build_ffn(ctx0, cur,
|
cur = llm_build_ffn(ctx0, cur,
|
||||||
model.layers[il].ffn_up, NULL,
|
model.layers[il].ffn_up, NULL,
|
||||||
|
@ -8363,9 +8422,6 @@ struct llm_build_context {
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
||||||
|
|
||||||
// positions of the tokens in the KV cache
|
|
||||||
struct ggml_tensor * KQ_pos = build_inp_KQ_pos();
|
|
||||||
|
|
||||||
inpL = llm_build_norm(ctx0, inpL, hparams,
|
inpL = llm_build_norm(ctx0, inpL, hparams,
|
||||||
model.tok_norm,
|
model.tok_norm,
|
||||||
model.tok_norm_b,
|
model.tok_norm_b,
|
||||||
|
@ -8399,7 +8455,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -8464,9 +8520,6 @@ struct llm_build_context {
|
||||||
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
|
||||||
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
|
||||||
|
|
||||||
// positions of the tokens in the KV cache
|
|
||||||
struct ggml_tensor * KQ_pos = build_inp_KQ_pos();
|
|
||||||
|
|
||||||
if (model.pos_embd) {
|
if (model.pos_embd) {
|
||||||
// inp_pos - contains the positions
|
// inp_pos - contains the positions
|
||||||
struct ggml_tensor * inp_pos = build_inp_pos();
|
struct ggml_tensor * inp_pos = build_inp_pos();
|
||||||
|
@ -8530,13 +8583,13 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
} else {
|
} else {
|
||||||
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
|
Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -8680,7 +8733,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, NULL,
|
model.layers[il].wo, NULL,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -8798,7 +8851,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, NULL,
|
model.layers[il].wo, NULL,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -8911,7 +8964,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -9025,7 +9078,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -9180,7 +9233,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f, cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -9297,7 +9350,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f, cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -9410,7 +9463,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, NULL,
|
model.layers[il].wo, NULL,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
struct ggml_tensor * sa_out = cur;
|
struct ggml_tensor * sa_out = cur;
|
||||||
|
|
||||||
|
@ -9513,7 +9566,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -9620,7 +9673,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -9736,7 +9789,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, NULL,
|
model.layers[il].wo, NULL,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -9853,7 +9906,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -9983,7 +10036,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -10104,7 +10157,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, NULL,
|
model.layers[il].wo, NULL,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f, cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f, cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -10223,7 +10276,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -10513,7 +10566,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, model.layers[il].bo,
|
model.layers[il].wo, model.layers[il].bo,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -10644,7 +10697,7 @@ struct llm_build_context {
|
||||||
|
|
||||||
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
cur = llm_build_kv(ctx0, model, hparams, cparams, kv_self, gf,
|
||||||
model.layers[il].wo, nullptr,
|
model.layers[il].wo, nullptr,
|
||||||
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
Kcur, Vcur, Qcur, KQ_mask, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (il == n_layer - 1) {
|
if (il == n_layer - 1) {
|
||||||
|
@ -10825,6 +10878,7 @@ static struct ggml_cgraph * llama_build_graph(
|
||||||
result = llm.build_refact();
|
result = llm.build_refact();
|
||||||
} break;
|
} break;
|
||||||
case LLM_ARCH_BERT:
|
case LLM_ARCH_BERT:
|
||||||
|
case LLM_ARCH_JINA_BERT_V2:
|
||||||
case LLM_ARCH_NOMIC_BERT:
|
case LLM_ARCH_NOMIC_BERT:
|
||||||
{
|
{
|
||||||
result = llm.build_bert();
|
result = llm.build_bert();
|
||||||
|
@ -11031,12 +11085,22 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
||||||
float f;
|
float f;
|
||||||
if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
|
if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
|
||||||
f = -INFINITY;
|
f = -INFINITY;
|
||||||
|
} else {
|
||||||
|
if (hparams.use_alibi) {
|
||||||
|
f = -fabs(lctx.kv_self.cells[i].pos - pos);
|
||||||
} else {
|
} else {
|
||||||
f = 0.0f;
|
f = 0.0f;
|
||||||
}
|
}
|
||||||
|
}
|
||||||
data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
|
data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for (int i = n_tokens; i < GGML_PAD(n_tokens, GGML_KQ_MASK_PAD); ++i) {
|
||||||
|
for (int j = 0; j < n_kv; ++j) {
|
||||||
|
data[h*(n_kv*n_tokens) + i*n_kv + j] = -INFINITY;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// when using kv cache, the mask needs to match the kv cache size
|
// when using kv cache, the mask needs to match the kv cache size
|
||||||
|
@ -11055,7 +11119,11 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
||||||
float f = -INFINITY;
|
float f = -INFINITY;
|
||||||
for (int s = 0; s < batch.n_seq_id[i]; ++s) {
|
for (int s = 0; s < batch.n_seq_id[i]; ++s) {
|
||||||
if (batch.seq_id[i][s] == seq_id) {
|
if (batch.seq_id[i][s] == seq_id) {
|
||||||
|
if (hparams.use_alibi) {
|
||||||
|
f = -fabs(batch.pos[i] - batch.pos[j]);
|
||||||
|
} else {
|
||||||
f = 0.0f;
|
f = 0.0f;
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -11071,21 +11139,6 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ALiBi requires the KQ_pos tensor to provide the sequence position of each token in the batch
|
|
||||||
// this allows to process multiple sequences in parallel with ALiBi-based models
|
|
||||||
if (hparams.use_alibi) {
|
|
||||||
const int64_t n_kv = kv_self.n;
|
|
||||||
|
|
||||||
GGML_ASSERT(lctx.inp_KQ_pos);
|
|
||||||
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_pos->buffer));
|
|
||||||
|
|
||||||
float * data = (float *) lctx.inp_KQ_pos->data;
|
|
||||||
|
|
||||||
for (int i = 0; i < n_kv; ++i) {
|
|
||||||
data[i] = float(lctx.kv_self.cells[i].pos);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
|
if (cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
|
||||||
const int64_t n_tokens = batch.n_tokens;
|
const int64_t n_tokens = batch.n_tokens;
|
||||||
|
|
||||||
|
@ -12200,13 +12253,14 @@ struct llm_tokenizer_bpe {
|
||||||
|
|
||||||
void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
|
void tokenize(const std::string & text, std::vector<llama_vocab::id> & output) {
|
||||||
int final_prev_index = -1;
|
int final_prev_index = -1;
|
||||||
|
bool ignore_merges = false;
|
||||||
|
|
||||||
std::vector<std::string> word_collection;
|
std::vector<std::string> word_collection;
|
||||||
switch (vocab.type) {
|
switch (vocab.type) {
|
||||||
case LLAMA_VOCAB_TYPE_BPE:
|
case LLAMA_VOCAB_TYPE_BPE:
|
||||||
switch (vocab.type_pre) {
|
switch (vocab.type_pre) {
|
||||||
case LLAMA_VOCAB_PRE_TYPE_LLAMA3:
|
case LLAMA_VOCAB_PRE_TYPE_LLAMA3:
|
||||||
case LLAMA_VOCAB_PRE_TYPE_DBRX:
|
ignore_merges = true;
|
||||||
word_collection = unicode_regex_split(text, {
|
word_collection = unicode_regex_split(text, {
|
||||||
// original regex from tokenizer.json
|
// original regex from tokenizer.json
|
||||||
//"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
|
//"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
|
||||||
|
@ -12215,6 +12269,12 @@ struct llm_tokenizer_bpe {
|
||||||
"(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
|
"(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
|
||||||
});
|
});
|
||||||
break;
|
break;
|
||||||
|
case LLAMA_VOCAB_PRE_TYPE_DBRX:
|
||||||
|
word_collection = unicode_regex_split(text, {
|
||||||
|
// same as llama3
|
||||||
|
"(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
|
||||||
|
});
|
||||||
|
break;
|
||||||
case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM:
|
case LLAMA_VOCAB_PRE_TYPE_DEEPSEEK_LLM:
|
||||||
word_collection = unicode_regex_split(text, {
|
word_collection = unicode_regex_split(text, {
|
||||||
"[\r\n]",
|
"[\r\n]",
|
||||||
|
@ -12298,6 +12358,11 @@ struct llm_tokenizer_bpe {
|
||||||
int index = 0;
|
int index = 0;
|
||||||
size_t offset = 0;
|
size_t offset = 0;
|
||||||
|
|
||||||
|
if (ignore_merges && vocab.token_to_id.find(word) != vocab.token_to_id.end()) {
|
||||||
|
symbols.emplace_back(llm_symbol{-1, -1, word.c_str(), word.size()});
|
||||||
|
offset = word.size();
|
||||||
|
}
|
||||||
|
|
||||||
while (offset < word.size()) {
|
while (offset < word.size()) {
|
||||||
llm_symbol sym;
|
llm_symbol sym;
|
||||||
size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
|
size_t char_len = std::min(word.size() - offset, (size_t) ::utf8_len(word[offset]));
|
||||||
|
@ -12752,7 +12817,10 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
GGML_ASSERT(vocab.special_add_eos != 1);
|
if (add_special && vocab.special_add_eos == 1) {
|
||||||
|
GGML_ASSERT(vocab.special_add_eos != -1);
|
||||||
|
output.push_back(vocab.special_eos_id);
|
||||||
|
}
|
||||||
} break;
|
} break;
|
||||||
case LLAMA_VOCAB_TYPE_WPM:
|
case LLAMA_VOCAB_TYPE_WPM:
|
||||||
{
|
{
|
||||||
|
@ -15509,11 +15577,6 @@ struct llama_context * llama_new_context_with_model(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (cparams.flash_attn && hparams.use_alibi) {
|
|
||||||
LLAMA_LOG_WARN("%s: flash_attn is not yet compatible with ALiBi - forcing off\n", __func__);
|
|
||||||
cparams.flash_attn = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (cparams.flash_attn && model->arch == LLM_ARCH_GROK) {
|
if (cparams.flash_attn && model->arch == LLM_ARCH_GROK) {
|
||||||
LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__);
|
LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__);
|
||||||
cparams.flash_attn = false;
|
cparams.flash_attn = false;
|
||||||
|
@ -15808,6 +15871,7 @@ enum llama_rope_type llama_rope_type(const struct llama_model * model) {
|
||||||
case LLM_ARCH_REFACT:
|
case LLM_ARCH_REFACT:
|
||||||
case LLM_ARCH_BLOOM:
|
case LLM_ARCH_BLOOM:
|
||||||
case LLM_ARCH_MAMBA:
|
case LLM_ARCH_MAMBA:
|
||||||
|
case LLM_ARCH_JINA_BERT_V2:
|
||||||
return LLAMA_ROPE_TYPE_NONE;
|
return LLAMA_ROPE_TYPE_NONE;
|
||||||
|
|
||||||
// use what we call a normal RoPE, operating on pairs of consecutive head values
|
// use what we call a normal RoPE, operating on pairs of consecutive head values
|
||||||
|
|
|
@ -104,3 +104,5 @@ __ggml_vocab_test__
|
||||||
|
|
||||||
🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL
|
🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ 🦙🦙 3 33 333 3333 33333 333333 3333333 33333333 3.3 3..3 3...3 កាន់តែពិសេសអាច😁 ?我想在apple工作1314151天~ ------======= нещо на Български ''''''```````""""......!!!!!!?????? I've been 'told he's there, 'RE you sure? 'M not sure I'll make it, 'D you like some tea? We'Ve a'lL
|
||||||
__ggml_vocab_test__
|
__ggml_vocab_test__
|
||||||
|
Việt
|
||||||
|
__ggml_vocab_test__
|
||||||
|
|
|
@ -41,3 +41,4 @@
|
||||||
8765 8765 1644
|
8765 8765 1644
|
||||||
8765 8765 8765
|
8765 8765 8765
|
||||||
198 4815 15073 66597 8004 1602 2355 79772 11187 9468 248 222 320 8416 8 27623 114 102470 9468 234 104 31643 320 36773 100166 98634 8 26602 227 11410 99 247 9468 99 247 220 18 220 1644 220 8765 220 8765 18 220 8765 1644 220 8765 8765 220 8765 8765 18 220 8765 8765 1644 220 18 13 18 220 18 497 18 220 18 1131 18 220 21549 222 98629 241 45358 233 21549 237 45358 224 21549 244 21549 115 21549 253 45358 223 21549 253 21549 95 98629 227 76460 223 949 37046 101067 19000 23182 102301 9263 18136 16 36827 21909 56560 54337 19175 102118 13373 64571 34694 3114 112203 80112 3436 106451 14196 14196 74694 3089 3089 29249 17523 3001 27708 7801 358 3077 1027 364 83 820 568 596 1070 11 364 793 499 2771 30 364 44 539 2771 358 3358 1304 433 11 364 35 499 1093 1063 15600 30 1226 6 43712 264 64966 43
|
198 4815 15073 66597 8004 1602 2355 79772 11187 9468 248 222 320 8416 8 27623 114 102470 9468 234 104 31643 320 36773 100166 98634 8 26602 227 11410 99 247 9468 99 247 220 18 220 1644 220 8765 220 8765 18 220 8765 1644 220 8765 8765 220 8765 8765 18 220 8765 8765 1644 220 18 13 18 220 18 497 18 220 18 1131 18 220 21549 222 98629 241 45358 233 21549 237 45358 224 21549 244 21549 115 21549 253 45358 223 21549 253 21549 95 98629 227 76460 223 949 37046 101067 19000 23182 102301 9263 18136 16 36827 21909 56560 54337 19175 102118 13373 64571 34694 3114 112203 80112 3436 106451 14196 14196 74694 3089 3089 29249 17523 3001 27708 7801 358 3077 1027 364 83 820 568 596 1070 11 364 793 499 2771 30 364 44 539 2771 358 3358 1304 433 11 364 35 499 1093 1063 15600 30 1226 6 43712 264 64966 43
|
||||||
|
101798
|
||||||
|
|
|
@ -325,8 +325,12 @@ table = []
|
||||||
for row in rows_show:
|
for row in rows_show:
|
||||||
n_prompt = int(row[-4])
|
n_prompt = int(row[-4])
|
||||||
n_gen = int(row[-3])
|
n_gen = int(row[-3])
|
||||||
assert n_prompt == 0 or n_gen == 0
|
if n_prompt != 0 and n_gen == 0:
|
||||||
test_name = f"tg{n_gen}" if n_prompt == 0 else f"pp{n_prompt}"
|
test_name = f"pp{n_prompt}"
|
||||||
|
elif n_prompt == 0 and n_gen != 0:
|
||||||
|
test_name = f"tg{n_gen}"
|
||||||
|
else:
|
||||||
|
test_name = f"pp{n_prompt}+tg{n_gen}"
|
||||||
# Regular columns test name avg t/s values Speedup
|
# Regular columns test name avg t/s values Speedup
|
||||||
# VVVVVVVVVVVVV VVVVVVVVV VVVVVVVVVVVVVV VVVVVVV
|
# VVVVVVVVVVVVV VVVVVVVVV VVVVVVVVVVVVVV VVVVVVV
|
||||||
table.append(list(row[:-4]) + [test_name] + list(row[-2:]) + [float(row[-1]) / float(row[-2])])
|
table.append(list(row[:-4]) + [test_name] + list(row[-2:]) + [float(row[-1]) / float(row[-2])])
|
||||||
|
|
117
scripts/debug-test.sh
Executable file
117
scripts/debug-test.sh
Executable file
|
@ -0,0 +1,117 @@
|
||||||
|
#!/bin/bash
|
||||||
|
test_suite=${1:-}
|
||||||
|
test_number=${2:-}
|
||||||
|
|
||||||
|
PROG=${0##*/}
|
||||||
|
build_dir="build-ci-debug"
|
||||||
|
|
||||||
|
if [ x"$1" = x"-h" ] || [ x"$1" = x"--help" ]; then
|
||||||
|
echo "Usage: $PROG [OPTION]... <test_regex> (test_number)"
|
||||||
|
echo "Debug specific ctest program."
|
||||||
|
echo
|
||||||
|
echo "Options:"
|
||||||
|
echo " -h, --help Display this help and exit"
|
||||||
|
echo
|
||||||
|
echo "Arguments:"
|
||||||
|
echo " <test_regex> (Mandatory) Supply one regex to the script to filter tests"
|
||||||
|
echo " (test_number) (Optional) Test number to run a specific test"
|
||||||
|
echo
|
||||||
|
echo "Example:"
|
||||||
|
echo " $PROG test-tokenizer"
|
||||||
|
echo " $PROG test-tokenizer 3"
|
||||||
|
echo
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Function to select and debug a test
|
||||||
|
function select_test() {
|
||||||
|
test_suite=${1:-test}
|
||||||
|
test_number=${2:-}
|
||||||
|
|
||||||
|
# Sanity Check If Tests Is Detected
|
||||||
|
printf "\n\nGathering tests that fit REGEX: ${test_suite} ...\n"
|
||||||
|
tests=($(ctest -R ${test_suite} -V -N | grep -E " +Test +#[0-9]+*" | cut -d':' -f2 | awk '{$1=$1};1'))
|
||||||
|
if [ ${#tests[@]} -eq 0 ]
|
||||||
|
then
|
||||||
|
echo "No tests avaliable... check your compliation process..."
|
||||||
|
echo "Exiting."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -z $test_number ]
|
||||||
|
then
|
||||||
|
# List out avaliable tests
|
||||||
|
printf "Which test would you like to debug?\n"
|
||||||
|
id=0
|
||||||
|
for s in "${tests[@]}"
|
||||||
|
do
|
||||||
|
echo "Test# ${id}"
|
||||||
|
echo " $s"
|
||||||
|
((id++))
|
||||||
|
done
|
||||||
|
|
||||||
|
# Prompt user which test they wanted to run
|
||||||
|
printf "\nRun test#? "
|
||||||
|
read test_number
|
||||||
|
else
|
||||||
|
printf "\nUser Already Requested #${test_number}"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Start GDB with the requested test binary and arguments
|
||||||
|
printf "Debugging(GDB) test: ${tests[test_number]}\n"
|
||||||
|
# Change IFS (Internal Field Separator)
|
||||||
|
sIFS=$IFS
|
||||||
|
IFS=$'\n'
|
||||||
|
|
||||||
|
# Get test args
|
||||||
|
gdb_args=($(ctest -R ${test_suite} -V -N | grep "Test command" | cut -d':' -f3 | awk '{$1=$1};1' ))
|
||||||
|
IFS=$sIFS
|
||||||
|
printf "Debug arguments: ${gdb_args[test_number]}\n\n"
|
||||||
|
|
||||||
|
# Expand paths if needed
|
||||||
|
args=()
|
||||||
|
for x in $(echo ${gdb_args[test_number]} | sed -e 's/"\/\<//' -e 's/\>"//')
|
||||||
|
do
|
||||||
|
args+=($(echo $x | sed -e 's/.*\/..\//..\//'))
|
||||||
|
done
|
||||||
|
|
||||||
|
# Execute debugger
|
||||||
|
echo "gdb args: ${args[@]}"
|
||||||
|
gdb --args ${args[@]}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Step 0: Check the args
|
||||||
|
if [ -z "$test_suite" ]
|
||||||
|
then
|
||||||
|
echo "Usage: $PROG [OPTION]... <test_regex> (test_number)"
|
||||||
|
echo "Supply one regex to the script to filter tests,"
|
||||||
|
echo "and optionally a test number to run a specific test."
|
||||||
|
echo "Use --help flag for full instructions"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Step 1: Reset and Setup folder context
|
||||||
|
## Sanity check that we are actually in a git repo
|
||||||
|
repo_root=$(git rev-parse --show-toplevel)
|
||||||
|
if [ ! -d "$repo_root" ]; then
|
||||||
|
echo "Error: Not in a Git repository."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
## Reset folder to root context of git repo
|
||||||
|
pushd "$repo_root" || exit 1
|
||||||
|
|
||||||
|
## Create and enter build directory
|
||||||
|
rm -rf "$build_dir" && mkdir "$build_dir" || exit 1
|
||||||
|
|
||||||
|
# Step 2: Setup Build Environment and Compile Test Binaries
|
||||||
|
cmake -B "./$build_dir" -DCMAKE_BUILD_TYPE=Debug -DLLAMA_CUDA=1 -DLLAMA_FATAL_WARNINGS=ON || exit 1
|
||||||
|
pushd "$build_dir" && make -j || exit 1
|
||||||
|
|
||||||
|
# Step 3: Debug the Test
|
||||||
|
select_test "$test_suite" "$test_number"
|
||||||
|
|
||||||
|
# Step 4: Return to the directory from which the user ran the command.
|
||||||
|
popd || exit 1
|
||||||
|
popd || exit 1
|
||||||
|
popd || exit 1
|
|
@ -1 +1 @@
|
||||||
98875cdb7e9ceeb726d1c196d2fecb3cbb59b93a
|
30f54cbb3ada3e4c5bc6924de3e5918e5be4ff11
|
||||||
|
|
|
@ -92,7 +92,7 @@ target_link_libraries(test-tokenizer-1-bpe PRIVATE common)
|
||||||
install(TARGETS test-tokenizer-1-bpe RUNTIME)
|
install(TARGETS test-tokenizer-1-bpe RUNTIME)
|
||||||
|
|
||||||
# TODO: disabled due to slowness
|
# TODO: disabled due to slowness
|
||||||
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf --ignore-merges)
|
||||||
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
|
||||||
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
|
||||||
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
|
#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
|
||||||
|
|
|
@ -1111,11 +1111,7 @@ struct test_soft_max : public test_case {
|
||||||
if (this->mask) {
|
if (this->mask) {
|
||||||
mask = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, ne[0], ne[1]);
|
mask = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, ne[0], ne[1]);
|
||||||
}
|
}
|
||||||
ggml_tensor * pos = nullptr;
|
ggml_tensor * out = ggml_soft_max_ext(ctx, a, mask, scale, max_bias);
|
||||||
if (max_bias > 0.0f) {
|
|
||||||
pos = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, ne[0]);
|
|
||||||
}
|
|
||||||
ggml_tensor * out = ggml_soft_max_ext(ctx, a, mask, pos, scale, max_bias);
|
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -1490,23 +1486,25 @@ struct test_flash_attn_ext : public test_case {
|
||||||
const int64_t kv; // kv size
|
const int64_t kv; // kv size
|
||||||
const int64_t nb; // batch size
|
const int64_t nb; // batch size
|
||||||
|
|
||||||
|
const float max_bias; // ALiBi
|
||||||
|
|
||||||
std::string vars() override {
|
std::string vars() override {
|
||||||
return VARS_TO_STR4(hs, nh, kv, nb);
|
return VARS_TO_STR5(hs, nh, kv, nb, max_bias);
|
||||||
}
|
}
|
||||||
|
|
||||||
double max_nmse_err() override {
|
double max_nmse_err() override {
|
||||||
return 5e-4;
|
return 5e-4;
|
||||||
}
|
}
|
||||||
|
|
||||||
test_flash_attn_ext(int64_t hs = 128, int64_t nh = 32, int64_t kv = 96, int64_t nb = 8)
|
test_flash_attn_ext(int64_t hs = 128, int64_t nh = 32, int64_t kv = 96, int64_t nb = 8, float max_bias = 0.0f)
|
||||||
: hs(hs), nh(nh), kv(kv), nb(nb) {}
|
: hs(hs), nh(nh), kv(kv), nb(nb), max_bias(max_bias) {}
|
||||||
|
|
||||||
ggml_tensor * build_graph(ggml_context * ctx) override {
|
ggml_tensor * build_graph(ggml_context * ctx) override {
|
||||||
ggml_tensor * q = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, hs, nb, nh, 1);
|
ggml_tensor * q = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, hs, nb, nh, 1);
|
||||||
ggml_tensor * k = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, hs, kv, nh, 1);
|
ggml_tensor * k = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, hs, kv, nh, 1);
|
||||||
ggml_tensor * v = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, hs, kv, nh, 1);
|
ggml_tensor * v = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, hs, kv, nh, 1);
|
||||||
ggml_tensor * mask = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, 1);
|
ggml_tensor * mask = ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, 1);
|
||||||
ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, mask, 1.0f/sqrtf(hs));
|
ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, mask, 1.0f/sqrtf(hs), max_bias);
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -1611,7 +1609,7 @@ public:
|
||||||
|
|
||||||
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
|
struct ggml_tensor * kq = ggml_mul_mat(ctx, k, q);
|
||||||
|
|
||||||
kq = ggml_soft_max_ext(ctx, kq, kq_mask, nullptr, kq_scale, 0.0f);
|
kq = ggml_soft_max_ext(ctx, kq, kq_mask, kq_scale, 0.0f);
|
||||||
|
|
||||||
// split cached v into n_head heads
|
// split cached v into n_head heads
|
||||||
struct ggml_tensor * v =
|
struct ggml_tensor * v =
|
||||||
|
@ -2128,6 +2126,7 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
|
||||||
#endif
|
#endif
|
||||||
for (bool mask : {false, true}) {
|
for (bool mask : {false, true}) {
|
||||||
for (float max_bias : {0.0f, 8.0f}) {
|
for (float max_bias : {0.0f, 8.0f}) {
|
||||||
|
if (!mask && max_bias > 0.0f) continue;
|
||||||
for (float scale : {1.0f, 0.1f}) {
|
for (float scale : {1.0f, 0.1f}) {
|
||||||
for (int64_t ne0 : {16, 1024}) {
|
for (int64_t ne0 : {16, 1024}) {
|
||||||
for (int64_t ne1 : {16, 1024}) {
|
for (int64_t ne1 : {16, 1024}) {
|
||||||
|
@ -2141,7 +2140,6 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
|
||||||
|
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, 0.1f, 0.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, 0.1f, 0.0f));
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 0.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 0.0f));
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, 0.1f, 8.0f));
|
|
||||||
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 8.0f));
|
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 8.0f));
|
||||||
|
|
||||||
for (ggml_type type : {GGML_TYPE_F32, GGML_TYPE_F16}) {
|
for (ggml_type type : {GGML_TYPE_F32, GGML_TYPE_F16}) {
|
||||||
|
@ -2180,10 +2178,12 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
|
||||||
#else
|
#else
|
||||||
for (int hs : { 64, 80, 128, 256, }) {
|
for (int hs : { 64, 80, 128, 256, }) {
|
||||||
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)
|
||||||
|
for (float max_bias : {0.0f, 8.0f}) {
|
||||||
for (int nh : { 32, }) {
|
for (int nh : { 32, }) {
|
||||||
for (int kv : { 512, 1024, }) {
|
for (int kv : { 512, 1024, }) {
|
||||||
for (int nb : { 1, 2, 4, 8, }) {
|
for (int nb : { 1, 2, 4, 8, }) {
|
||||||
test_cases.emplace_back(new test_flash_attn_ext(hs, nh, kv, nb));
|
test_cases.emplace_back(new test_flash_attn_ext(hs, nh, kv, nb, max_bias));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,15 +13,27 @@
|
||||||
#include <vector>
|
#include <vector>
|
||||||
|
|
||||||
int main(int argc, char **argv) {
|
int main(int argc, char **argv) {
|
||||||
if (argc < 2) {
|
if (argc < 2 || argc > 3) {
|
||||||
fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]);
|
fprintf(stderr, "Usage: %s <vocab-file> [--ignore-merges]\n", argv[0]);
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
const std::string fname = argv[1];
|
const std::string fname = argv[1];
|
||||||
|
bool ignore_merges = false;
|
||||||
|
if (argc == 3) {
|
||||||
|
if (std::strcmp(argv[2], "--ignore-merges") != 0) {
|
||||||
|
fprintf(stderr, "Usage: %s <vocab-file> [--ignore-merges]\n", argv[0]);
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
ignore_merges = true;
|
||||||
|
}
|
||||||
|
|
||||||
fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
|
fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
|
||||||
|
|
||||||
|
if (ignore_merges) {
|
||||||
|
fprintf(stderr, "%s : ignoring merges for tokens inside vocab\n", __func__);
|
||||||
|
}
|
||||||
|
|
||||||
llama_model * model;
|
llama_model * model;
|
||||||
llama_context * ctx;
|
llama_context * ctx;
|
||||||
|
|
||||||
|
@ -65,7 +77,19 @@ int main(int argc, char **argv) {
|
||||||
std::string str = llama_detokenize_bpe(ctx, std::vector<int>(1, i));
|
std::string str = llama_detokenize_bpe(ctx, std::vector<int>(1, i));
|
||||||
try {
|
try {
|
||||||
auto cps = unicode_cpts_from_utf8(str);
|
auto cps = unicode_cpts_from_utf8(str);
|
||||||
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
|
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false, true);
|
||||||
|
if (ignore_merges && tokens.size() > 1) {
|
||||||
|
fprintf(stderr,
|
||||||
|
"%s : error: token %d detokenizes to '%s'(%zu) but "
|
||||||
|
"tokenization of this to multiple tokens: [",
|
||||||
|
__func__, i, str.c_str(), str.length());
|
||||||
|
fprintf(stderr, "%d", tokens[0]);
|
||||||
|
for (size_t i = 1; i < tokens.size(); i++) {
|
||||||
|
fprintf(stderr, ", %d", tokens[i]);
|
||||||
|
}
|
||||||
|
fprintf(stderr, "]\n");
|
||||||
|
return 2;
|
||||||
|
}
|
||||||
std::string check = llama_detokenize_bpe(ctx, tokens);
|
std::string check = llama_detokenize_bpe(ctx, tokens);
|
||||||
if (check != str) {
|
if (check != str) {
|
||||||
fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
|
fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue