Compare commits

...
Sign in to create a new pull request.

37 commits

Author SHA1 Message Date
Georgi Gerganov
7923b70cb8
llama : add llm_build_inp_embd helper 2023-10-31 16:43:08 +02:00
Georgi Gerganov
2073347e3b
llama : remove extra ; + deduplicate gate_b logic 2023-10-31 16:28:09 +02:00
Georgi Gerganov
fc5a26aade
llama : enable warning about not offloaded tensors 2023-10-31 08:57:10 +02:00
Georgi Gerganov
0bfdcdd0f8
llama : normalize tensor names
ggml-ci
2023-10-31 08:48:37 +02:00
Georgi Gerganov
6669cd8329
llama : update offload functions for KQ tensors 2023-10-31 08:24:07 +02:00
Georgi Gerganov
2926ef63b1
llama : fix input allocation logic 2023-10-31 08:23:43 +02:00
Georgi Gerganov
a3f80013ad
llama : add LLAMA_OFFLOAD_DEBUG + fix starcoder offloading 2023-10-30 12:14:23 +02:00
Georgi Gerganov
792d1a1b16
llama : minor 2023-10-30 11:34:47 +02:00
Georgi Gerganov
f39e6075cf
llama : add llm_build_kqv helper
ggml-ci
2023-10-29 22:45:03 +02:00
Georgi Gerganov
c9121fdd0f
llama : remove obsolete comments in build graphs 2023-10-29 21:44:19 +02:00
Georgi Gerganov
a104abea48
llama : simplify falcon Q, K, V computation 2023-10-29 21:24:25 +02:00
Georgi Gerganov
31a12f3d03
llama : fix llm_build_k_shift to use n_head_kv instead of n_head 2023-10-29 21:17:46 +02:00
Georgi Gerganov
5990861938
llama : remove obsolete offload names 2023-10-29 21:11:20 +02:00
Georgi Gerganov
3e0462594b
llama : add llm_build_kv_store helper
ggml-ci
2023-10-29 21:09:34 +02:00
Georgi Gerganov
909d64471b
llama : fix offloading after recent changes 2023-10-29 20:38:49 +02:00
Georgi Gerganov
38728a0be0
llama : add llm_build_k_shift helper
ggml-ci
2023-10-29 19:23:07 +02:00
Georgi Gerganov
dbf836bb64
llama : add llm_build_ffn helper function (#3849)
ggml-ci
2023-10-29 18:47:46 +02:00
Georgi Gerganov
7db9c96d8a
llama : add llm_build_norm helper function
ggml-ci
2023-10-29 15:48:48 +02:00
Georgi Gerganov
210e6e5d02
llama : remove obsolete map for layer counting 2023-10-29 13:39:04 +02:00
Georgi Gerganov
79ad734417
llama : comment
ggml-ci
2023-10-29 13:27:53 +02:00
Georgi Gerganov
761087932b
llama : add functional header 2023-10-29 13:26:32 +02:00
Georgi Gerganov
8925cf9ef8
llama : add layer index to all tensor names 2023-10-29 13:22:15 +02:00
Georgi Gerganov
1e9c5443c2
llama : refactor tensor offloading as callback 2023-10-29 13:05:10 +02:00
Georgi Gerganov
da936188d8
llama : move refact in correct place + optimize graph input 2023-10-29 11:48:58 +02:00
Georgi Gerganov
739b85c985
llama : try to fix build 2023-10-29 11:25:32 +02:00
Georgi Gerganov
25cfbf6776
llama : fix non-CUDA build 2023-10-29 11:12:03 +02:00
Georgi Gerganov
b4ad03b3a7
llama : try to optimize offloading code 2023-10-29 10:33:11 +02:00
Georgi Gerganov
79617902ea
llama : fix res_norm offloading 2023-10-29 09:20:35 +02:00
Georgi Gerganov
e14aa46151
llama : do tensor offload only with CUDA 2023-10-29 08:03:46 +02:00
Georgi Gerganov
0dc05b8433
llama : factor graph input into a function 2023-10-29 07:52:43 +02:00
Georgi Gerganov
4e98897ede
llama : support offloading result_norm + comments 2023-10-29 07:36:07 +02:00
Georgi Gerganov
51c4f9ee9f
llama : comments 2023-10-28 22:50:08 +03:00
Georgi Gerganov
3af8771389
llama : update offload log messages to print node index 2023-10-28 22:36:44 +03:00
Georgi Gerganov
83d2c43791
llama : offload rest of the models
ggml-ci
2023-10-28 22:30:54 +03:00
Georgi Gerganov
38aca9e1ab
llama : factor out tensor offloading outside the build call (wip)
ggml-ci
2023-10-28 21:22:31 +03:00
Georgi Gerganov
5946d98fc8
metal : disable kernel load log 2023-10-28 21:22:01 +03:00
Georgi Gerganov
8b2420d249
llama : factor out ggml-alloc from graph graph build functions
ggml-ci
2023-10-28 19:54:28 +03:00
3 changed files with 1477 additions and 2191 deletions

View file

@ -238,12 +238,15 @@ struct ggml_metal_context * ggml_metal_init(int n_cb) {
// load kernels
{
NSError * error = nil;
#define GGML_METAL_ADD_KERNEL(name) \
ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \
ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \
/*
GGML_METAL_LOG_INFO("%s: loaded %-32s %16p | th_max = %4d | th_width = %4d\n", __func__, "kernel_"#name, (void *) ctx->pipeline_##name, \
(int) ctx->pipeline_##name.maxTotalThreadsPerThreadgroup, \
(int) ctx->pipeline_##name.threadExecutionWidth); \
*/
#define GGML_METAL_ADD_KERNEL(name) \
ctx->function_##name = [ctx->library newFunctionWithName:@"kernel_"#name]; \
ctx->pipeline_##name = [ctx->device newComputePipelineStateWithFunction:ctx->function_##name error:&error]; \
if (error) { \
GGML_METAL_LOG_ERROR("%s: error: load pipeline error: %s\n", __func__, [[error description] UTF8String]); \
return NULL; \

3595
llama.cpp

File diff suppressed because it is too large Load diff