Merge branch 'master' into compilade/refactor-kv-cache
This commit is contained in:
commit
10c3c419e9
518 changed files with 78202 additions and 66427 deletions
|
@ -12,43 +12,45 @@ include_directories(${CMAKE_CURRENT_SOURCE_DIR})
|
|||
|
||||
if (EMSCRIPTEN)
|
||||
else()
|
||||
add_subdirectory(cvector-generator)
|
||||
add_subdirectory(baby-llama)
|
||||
add_subdirectory(batched)
|
||||
add_subdirectory(batched-bench)
|
||||
add_subdirectory(batched)
|
||||
add_subdirectory(benchmark)
|
||||
add_subdirectory(convert-llama2c-to-ggml)
|
||||
add_subdirectory(embedding)
|
||||
add_subdirectory(eval-callback)
|
||||
add_subdirectory(export-lora)
|
||||
add_subdirectory(finetune)
|
||||
add_subdirectory(gritlm)
|
||||
add_subdirectory(gbnf-validator)
|
||||
add_subdirectory(gguf-split)
|
||||
add_subdirectory(gguf)
|
||||
add_subdirectory(gritlm)
|
||||
add_subdirectory(imatrix)
|
||||
add_subdirectory(infill)
|
||||
add_subdirectory(llama-bench)
|
||||
add_subdirectory(llava)
|
||||
if (LLAMA_SYCL)
|
||||
add_subdirectory(sycl)
|
||||
endif()
|
||||
add_subdirectory(main)
|
||||
add_subdirectory(tokenize)
|
||||
add_subdirectory(parallel)
|
||||
add_subdirectory(perplexity)
|
||||
add_subdirectory(quantize)
|
||||
add_subdirectory(quantize-stats)
|
||||
add_subdirectory(retrieval)
|
||||
add_subdirectory(save-load-state)
|
||||
add_subdirectory(simple)
|
||||
add_subdirectory(passkey)
|
||||
add_subdirectory(speculative)
|
||||
add_subdirectory(lookahead)
|
||||
add_subdirectory(lookup)
|
||||
add_subdirectory(gguf)
|
||||
add_subdirectory(train-text-from-scratch)
|
||||
add_subdirectory(imatrix)
|
||||
if (LLAMA_BUILD_SERVER)
|
||||
add_subdirectory(server)
|
||||
endif()
|
||||
add_subdirectory(export-lora)
|
||||
if (LLAMA_RPC)
|
||||
add_subdirectory(main)
|
||||
add_subdirectory(parallel)
|
||||
add_subdirectory(passkey)
|
||||
add_subdirectory(perplexity)
|
||||
add_subdirectory(quantize-stats)
|
||||
add_subdirectory(quantize)
|
||||
add_subdirectory(retrieval)
|
||||
if (GGML_RPC)
|
||||
add_subdirectory(rpc)
|
||||
endif()
|
||||
if (LLAMA_BUILD_SERVER)
|
||||
add_subdirectory(server)
|
||||
endif()
|
||||
if (GGML_SYCL)
|
||||
add_subdirectory(sycl)
|
||||
endif()
|
||||
add_subdirectory(save-load-state)
|
||||
add_subdirectory(simple)
|
||||
add_subdirectory(speculative)
|
||||
add_subdirectory(tokenize)
|
||||
add_subdirectory(train-text-from-scratch)
|
||||
endif()
|
||||
|
|
|
@ -22,7 +22,7 @@ if [ -n "$N_THREAD" ]; then
|
|||
GEN_OPTIONS+=(--threads "$N_THREAD")
|
||||
fi
|
||||
|
||||
./main "${GEN_OPTIONS[@]}" \
|
||||
./llama-cli "${GEN_OPTIONS[@]}" \
|
||||
--model "$MODEL" \
|
||||
--in-prefix " " \
|
||||
--in-suffix "${AI_NAME}:" \
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET baby-llama)
|
||||
set(TARGET llama-baby-llama)
|
||||
add_executable(${TARGET} baby-llama.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -58,4 +58,4 @@ echo "$2
|
|||
model=$1
|
||||
|
||||
# generate the most likely continuation until the string "===" is found
|
||||
./main -m $model -f $ftmp -n 64 --temp 0 --repeat-penalty 1.0 --no-penalize-nl -r "===" $eargs
|
||||
./llama-cli -m $model -f $ftmp -n 64 --temp 0 --repeat-penalty 1.0 --no-penalize-nl -r "===" $eargs
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET batched-bench)
|
||||
set(TARGET llama-batched-bench)
|
||||
add_executable(${TARGET} batched-bench.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -10,16 +10,16 @@ There are 2 modes of operation:
|
|||
- `prompt is shared` - there is a common prompt of size `PP` used by all batches (i.e. `N_KV = PP + B*TG`)
|
||||
|
||||
```bash
|
||||
./batched-bench -m model.gguf -c 2048 -b 2048 -ub 512 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 [-pps]
|
||||
./llama-batched-bench -m model.gguf -c 2048 -b 2048 -ub 512 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 [-pps]
|
||||
|
||||
# LLaMA 7B, F16, N_KV_MAX = 16384 (8GB), prompt not shared
|
||||
./batched-bench -m ./models/llama-7b/ggml-model-f16.gguf -c 16384 -b 2048 -ub 512 -ngl 99
|
||||
./llama-batched-bench -m ./models/llama-7b/ggml-model-f16.gguf -c 16384 -b 2048 -ub 512 -ngl 99
|
||||
|
||||
# LLaMA 7B, Q8_0, N_KV_MAX = 16384 (8GB), prompt is shared
|
||||
./batched-bench -m ./models/llama-7b/ggml-model-q8_0.gguf -c 16384 -b 2048 -ub 512 -ngl 99 -pps
|
||||
./llama-batched-bench -m ./models/llama-7b/ggml-model-q8_0.gguf -c 16384 -b 2048 -ub 512 -ngl 99 -pps
|
||||
|
||||
# custom set of batches
|
||||
./batched-bench -m ./models/llama-7b/ggml-model-q8_0.gguf -c 2048 -b 512 -ub 512 -ngl 999 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32
|
||||
./llama-batched-bench -m ./models/llama-7b/ggml-model-q8_0.gguf -c 2048 -b 512 -ub 512 -ngl 999 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32
|
||||
```
|
||||
|
||||
## Sample results
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
.PHONY: build
|
||||
|
||||
build:
|
||||
xcodebuild -scheme batched_swift -destination "generic/platform=macOS" -derivedDataPath build
|
||||
rm -f ./batched_swift
|
||||
ln -s ./build/Build/Products/Debug/batched_swift ./batched_swift
|
||||
xcodebuild -scheme llama-batched-swift -destination "generic/platform=macOS" -derivedDataPath build
|
||||
rm -f ./llama-batched-swift
|
||||
ln -s ./build/Build/Products/Debug/llama-batched-swift ./llama-batched-swift
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
import PackageDescription
|
||||
|
||||
let package = Package(
|
||||
name: "batched_swift",
|
||||
name: "llama-batched-swift",
|
||||
platforms: [.macOS(.v12)],
|
||||
dependencies: [
|
||||
.package(name: "llama", path: "../../"),
|
||||
|
@ -13,7 +13,7 @@ let package = Package(
|
|||
// Targets are the basic building blocks of a package, defining a module or a test suite.
|
||||
// Targets can depend on other targets in this package and products from dependencies.
|
||||
.executableTarget(
|
||||
name: "batched_swift",
|
||||
name: "llama-batched-swift",
|
||||
dependencies: ["llama"],
|
||||
path: "Sources",
|
||||
linkerSettings: [.linkedFramework("Foundation"), .linkedFramework("AppKit")]
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
This is a swift clone of `examples/batched`.
|
||||
|
||||
$ `make`
|
||||
$ `./batched_swift MODEL_PATH [PROMPT] [PARALLEL]`
|
||||
$ `./llama-batched-swift MODEL_PATH [PROMPT] [PARALLEL]`
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET batched)
|
||||
set(TARGET llama-batched)
|
||||
add_executable(${TARGET} batched.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
The example demonstrates batched generation from a given prompt
|
||||
|
||||
```bash
|
||||
./batched -m ./models/llama-7b-v2/ggml-model-f16.gguf -p "Hello my name is" -np 4
|
||||
./llama-batched -m ./models/llama-7b-v2/ggml-model-f16.gguf -p "Hello my name is" -np 4
|
||||
|
||||
...
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET benchmark)
|
||||
set(TARGET llama-bench-matmult)
|
||||
add_executable(${TARGET} benchmark-matmult.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -30,7 +30,7 @@ sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \
|
|||
$PROMPT_TEMPLATE > $PROMPT_FILE
|
||||
|
||||
# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS
|
||||
./main $GEN_OPTIONS \
|
||||
./llama-cli $GEN_OPTIONS \
|
||||
--model "$MODEL" \
|
||||
--threads "$N_THREAD" \
|
||||
--n_predict "$N_PREDICTS" \
|
||||
|
|
|
@ -62,7 +62,7 @@ fi
|
|||
if [[ ! -e "$PROMPT_CACHE_FILE" ]]; then
|
||||
echo 'Prompt cache does not exist, building...'
|
||||
# Default batch_size to 64 here for better user feedback during initial prompt processing
|
||||
./main 2>>"$LOG" \
|
||||
./llama-cli 2>>"$LOG" \
|
||||
--batch_size 64 \
|
||||
"${OPTS[@]}" \
|
||||
--prompt-cache "$PROMPT_CACHE_FILE" \
|
||||
|
@ -109,13 +109,13 @@ while read -e line; do
|
|||
|
||||
printf '%s: ' "$AI_NAME" >>"$CUR_PROMPT_FILE"
|
||||
|
||||
./main 2>>"$LOG" "${OPTS[@]}" \
|
||||
./llama-cli 2>>"$LOG" "${OPTS[@]}" \
|
||||
--prompt-cache "$CUR_PROMPT_CACHE" \
|
||||
--prompt-cache-all \
|
||||
--file "$CUR_PROMPT_FILE" \
|
||||
--reverse-prompt "${USER_NAME}:" \
|
||||
--n_predict "$n_predict" |
|
||||
skip_bytes 1 | # skip BOS token added by ./main
|
||||
skip_bytes 1 | # skip BOS token added by ./llama-cli
|
||||
tee "$CUR_PROMPT_FILE.tmp" | # save prompt + generation to tmp file
|
||||
skip_bytes "$n_prompt_len_pre" # print generation
|
||||
|
||||
|
@ -133,7 +133,7 @@ while read -e line; do
|
|||
# TODO get both messages in one go
|
||||
if ! session_size_msg="$(tail -n30 "$LOG" | grep -oE "$SESSION_SIZE_MSG_PATTERN")" ||
|
||||
! sample_time_msg="$(tail -n10 "$LOG" | grep -oE "$SAMPLE_TIME_MSG_PATTERN")"; then
|
||||
echo >&2 "Couldn't get number of tokens from ./main output!"
|
||||
echo >&2 "Couldn't get number of tokens from ./llama-cli output!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
@ -144,7 +144,7 @@ while read -e line; do
|
|||
fi
|
||||
|
||||
# Update cache for next prompt in background, ideally during user input
|
||||
./main >>"$LOG_BG" 2>&1 "${OPTS[@]}" \
|
||||
./llama-cli >>"$LOG_BG" 2>&1 "${OPTS[@]}" \
|
||||
--prompt-cache "$NEXT_PROMPT_CACHE" \
|
||||
--file "$NEXT_PROMPT_FILE" \
|
||||
--n_predict 1 &
|
||||
|
|
|
@ -30,7 +30,7 @@ sed -e "s/\[\[USER_NAME\]\]/$USER_NAME/g" \
|
|||
$PROMPT_TEMPLATE > $PROMPT_FILE
|
||||
|
||||
# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS
|
||||
./bin/main $GEN_OPTIONS \
|
||||
./bin/llama-cli $GEN_OPTIONS \
|
||||
--model "$MODEL" \
|
||||
--threads "$N_THREAD" \
|
||||
--n_predict "$N_PREDICTS" \
|
||||
|
|
|
@ -11,6 +11,6 @@ cd ..
|
|||
#
|
||||
# "--keep 48" is based on the contents of prompts/chat-with-bob.txt
|
||||
#
|
||||
./main -m ./models/llama-7b/ggml-model-q4_0.gguf -c 512 -b 1024 -n 256 --keep 48 \
|
||||
./llama-cli -m ./models/llama-7b/ggml-model-q4_0.gguf -c 512 -b 1024 -n 256 --keep 48 \
|
||||
--repeat_penalty 1.0 --color -i \
|
||||
-r "User:" -f prompts/chat-with-bob.txt
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET convert-llama2c-to-ggml)
|
||||
set(TARGET llama-convert-llama2c-to-ggml)
|
||||
add_executable(${TARGET} convert-llama2c-to-ggml.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -8,7 +8,7 @@ To convert the model first download the models from the [llama2.c](https://githu
|
|||
|
||||
After successful compilation, following usage options are available:
|
||||
```
|
||||
usage: ./convert-llama2c-to-ggml [options]
|
||||
usage: ./llama-convert-llama2c-to-ggml [options]
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
|
@ -19,10 +19,10 @@ options:
|
|||
|
||||
An example command using a model from [karpathy/tinyllamas](https://huggingface.co/karpathy/tinyllamas) is as follows:
|
||||
|
||||
`$ ./convert-llama2c-to-ggml --copy-vocab-from-model llama-2-7b-chat.gguf.q2_K.bin --llama2c-model stories42M.bin --llama2c-output-model stories42M.gguf.bin`
|
||||
`$ ./llama-convert-llama2c-to-ggml --copy-vocab-from-model llama-2-7b-chat.gguf.q2_K.bin --llama2c-model stories42M.bin --llama2c-output-model stories42M.gguf.bin`
|
||||
|
||||
Note: The vocabulary for `stories260K.bin` should be its own tokenizer `tok512.bin` found in [karpathy/tinyllamas/stories260K](https://huggingface.co/karpathy/tinyllamas/tree/main/stories260K).
|
||||
|
||||
Now you can use the model with a command like:
|
||||
|
||||
`$ ./main -m stories42M.gguf.bin -p "One day, Lily met a Shoggoth" -n 500 -c 256`
|
||||
`$ ./llama-cli -m stories42M.gguf.bin -p "One day, Lily met a Shoggoth" -n 500 -c 256`
|
||||
|
|
5
examples/cvector-generator/CMakeLists.txt
Normal file
5
examples/cvector-generator/CMakeLists.txt
Normal file
|
@ -0,0 +1,5 @@
|
|||
set(TARGET llama-cvector-generator)
|
||||
add_executable(${TARGET} cvector-generator.cpp pca.hpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
45
examples/cvector-generator/README.md
Normal file
45
examples/cvector-generator/README.md
Normal file
|
@ -0,0 +1,45 @@
|
|||
# cvector-generator
|
||||
|
||||
This example demonstrates how to generate a control vector using gguf models.
|
||||
|
||||
Related PRs:
|
||||
- [Add support for control vectors](https://github.com/ggerganov/llama.cpp/pull/5970)
|
||||
- (Issue) [Generate control vector using llama.cpp](https://github.com/ggerganov/llama.cpp/issues/6880)
|
||||
- [Add cvector-generator example](https://github.com/ggerganov/llama.cpp/pull/7514)
|
||||
|
||||
## Examples
|
||||
|
||||
```sh
|
||||
# CPU only
|
||||
./cvector-generator -m ./llama-3.Q4_K_M.gguf
|
||||
|
||||
# With GPU
|
||||
./cvector-generator -m ./llama-3.Q4_K_M.gguf -ngl 99
|
||||
|
||||
# With advanced options
|
||||
./cvector-generator -m ./llama-3.Q4_K_M.gguf -ngl 99 --pca-iter 2000 --pca-batch 100
|
||||
|
||||
# Using mean value instead of PCA
|
||||
./cvector-generator -m ./llama-3.Q4_K_M.gguf --method mean
|
||||
|
||||
# To see help message
|
||||
./cvector-generator -h
|
||||
# Then, have a look at "cvector" section
|
||||
```
|
||||
|
||||
## Tips and tricks
|
||||
|
||||
If you have multiple lines per prompt, you can escape the newline character (change it to `\n`). For example:
|
||||
|
||||
```
|
||||
<|im_start|>system\nAct like a person who is extremely happy.<|im_end|>
|
||||
<|im_start|>system\nYou are in a very good mood today<|im_end|>
|
||||
```
|
||||
|
||||
Example to use output file with `llama-cli`:
|
||||
|
||||
(Tips: The control vector works better when apply to layers higher than 10)
|
||||
|
||||
```sh
|
||||
./llama-cli -m ./llama-3.Q4_K_M.gguf -p "<|start_header_id|>system<|end_header_id|>\n\nYou are a helpful assistant<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nSing a song<|im_end|><|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n" --special --control-vector-scaled ./control_vector.gguf 0.8 --control-vector-layer-range 10 31
|
||||
```
|
582
examples/cvector-generator/completions.txt
Normal file
582
examples/cvector-generator/completions.txt
Normal file
|
@ -0,0 +1,582 @@
|
|||
|
||||
That game
|
||||
I can see
|
||||
Hmm, this
|
||||
I can relate to
|
||||
Who is
|
||||
I understand the
|
||||
Ugh,
|
||||
What the hell was
|
||||
Hey, did anyone
|
||||
Although
|
||||
Thank you for choosing
|
||||
What are you
|
||||
Oh w
|
||||
How dare you open
|
||||
It was my pleasure
|
||||
I'm hon
|
||||
I appreciate that you
|
||||
Are you k
|
||||
Whoever left this
|
||||
It's always
|
||||
Ew,
|
||||
Hey, I l
|
||||
Hello? Is someone
|
||||
I understand that
|
||||
That poem
|
||||
Aww, poor
|
||||
Hey, it
|
||||
Alright, who
|
||||
I didn't
|
||||
Well, life
|
||||
The document
|
||||
Oh no, this
|
||||
I'm concerned
|
||||
Hello, this is
|
||||
This art
|
||||
Hmm, this drink
|
||||
Hi there!
|
||||
It seems
|
||||
Is
|
||||
Good
|
||||
I can't
|
||||
Ex
|
||||
Who are
|
||||
I can see that
|
||||
Wow,
|
||||
Today is a
|
||||
Hey friend
|
||||
Sometimes friends
|
||||
Oh, this old
|
||||
The weather outside
|
||||
This place is sur
|
||||
I appreciate your input
|
||||
Thank you for the
|
||||
Look at
|
||||
I'm disappoint
|
||||
To my
|
||||
How dare you
|
||||
That's an
|
||||
This piece of art
|
||||
Eww
|
||||
This park is
|
||||
This is incredible
|
||||
Oh no, someone
|
||||
Exc
|
||||
Well, it'
|
||||
I warned
|
||||
Hey, I understand
|
||||
Hey, I saw
|
||||
How dare you go
|
||||
What the he
|
||||
Hey
|
||||
It's
|
||||
Hello? Hello?
|
||||
It
|
||||
Oh no!
|
||||
This is the perfect
|
||||
Good morning,
|
||||
Oh no, there
|
||||
It's so
|
||||
Yeah
|
||||
Uh,
|
||||
Hello everyone
|
||||
Who turned off
|
||||
The weather
|
||||
Who'
|
||||
Hey, this
|
||||
Wait,
|
||||
Eww, gross
|
||||
Excuse
|
||||
It seems like you
|
||||
Thank you so
|
||||
What happened?
|
||||
Oh my g
|
||||
I am deeply sad
|
||||
I war
|
||||
Okay, let'
|
||||
Hey, that
|
||||
That was a beautiful
|
||||
Oh no! That
|
||||
What happened
|
||||
Hey there
|
||||
The artist'
|
||||
What?!
|
||||
Hey, it'
|
||||
I am disappoint
|
||||
It seems like
|
||||
Oh no! The
|
||||
This park is a
|
||||
If you
|
||||
Yes! I did
|
||||
It sounds
|
||||
What
|
||||
Who is it
|
||||
Hmm, that
|
||||
That's strange
|
||||
Yeah, that was
|
||||
That's interesting
|
||||
This park
|
||||
What the hell
|
||||
Who is that
|
||||
I feel like my
|
||||
Oh well
|
||||
What the hell is
|
||||
Hello? Hello
|
||||
To my dearest
|
||||
Bless you!\"
|
||||
Thank you for
|
||||
Oh, looks like
|
||||
Can you please
|
||||
This place is
|
||||
Eww, what
|
||||
Bless you
|
||||
Is everything
|
||||
Hey, I just
|
||||
Whoever left these
|
||||
Well, that'
|
||||
I feel
|
||||
Hey, do you
|
||||
It's sad
|
||||
Oh no, it
|
||||
Hey, that'
|
||||
Oh my god,
|
||||
Thank you,
|
||||
Hello little one,
|
||||
I apolog
|
||||
Hey team, I
|
||||
How dare you read
|
||||
Who is this and
|
||||
Whoever left
|
||||
Hi there! W
|
||||
A
|
||||
If you have
|
||||
I was
|
||||
U
|
||||
Bless
|
||||
Well, this
|
||||
Oh, I'
|
||||
It's a
|
||||
Eww,
|
||||
Is everything okay?
|
||||
Oh, I
|
||||
Hello, can you
|
||||
Al
|
||||
That was a great
|
||||
What are
|
||||
I understand that not
|
||||
Oh no, not
|
||||
Who is it?\"
|
||||
Hey, can we
|
||||
Whoever is taking
|
||||
I would love to
|
||||
Hey, I noticed
|
||||
Hey, could
|
||||
I understand that there
|
||||
Hello?
|
||||
D
|
||||
Oh man, I
|
||||
Thank you so much
|
||||
Oh no, my
|
||||
Dear [Name
|
||||
Uh
|
||||
I remember
|
||||
Hey, who
|
||||
Well, it
|
||||
Are you
|
||||
I understand that it
|
||||
Hey, is
|
||||
I would
|
||||
Who is this
|
||||
Excuse me
|
||||
Alright
|
||||
I am thrilled
|
||||
Sometimes friends have
|
||||
Who the
|
||||
It's interesting
|
||||
I would love
|
||||
E
|
||||
Hello? Is anyone
|
||||
Well, this is
|
||||
This place
|
||||
Well,
|
||||
I warned you
|
||||
Hey, watch where
|
||||
Oh my
|
||||
That'
|
||||
Sometimes friends have different
|
||||
I understand that everyone
|
||||
What?
|
||||
What do these notes
|
||||
I can relate
|
||||
I'm not
|
||||
I understand
|
||||
To my dear
|
||||
Guys
|
||||
Well
|
||||
Hey, I appreciate
|
||||
Wow, what
|
||||
Dear
|
||||
That melody
|
||||
Who the hell
|
||||
Today is
|
||||
Hello little
|
||||
Wow, look
|
||||
That's great
|
||||
Love is never wrong
|
||||
I'm having
|
||||
Whoa, did
|
||||
Ugh
|
||||
Can you please provide
|
||||
I miss you,
|
||||
I feel uncom
|
||||
I know
|
||||
Ugh, this
|
||||
Hey, watch
|
||||
Oh great, a
|
||||
I didn
|
||||
Okay
|
||||
That game of char
|
||||
Oh
|
||||
I appreciate
|
||||
Who's there
|
||||
I am so
|
||||
Oh great, someone
|
||||
Hey, could you
|
||||
I remember wondering
|
||||
Wait, what?
|
||||
What do
|
||||
Hello? Can
|
||||
Hey there,
|
||||
That game of
|
||||
This is incred
|
||||
Oh my gosh
|
||||
Oh great, f
|
||||
I appreciate your
|
||||
It sounds like
|
||||
What the heck
|
||||
Okay, I understand
|
||||
Ew
|
||||
I understand that this
|
||||
Uh, hi
|
||||
Hi everyone!
|
||||
What the hell?
|
||||
Thank you for your
|
||||
Oh no, the
|
||||
Wow, I
|
||||
Who turned
|
||||
Dear [
|
||||
Whoever
|
||||
This is a
|
||||
Whoa, he
|
||||
What in the world
|
||||
Although the physical
|
||||
Hello, who is
|
||||
That's amaz
|
||||
Hey, I know
|
||||
Okay, that
|
||||
Hi everyone
|
||||
Hey, is everything
|
||||
I understand your fr
|
||||
Oh no, poor
|
||||
Oh, look
|
||||
Good morning
|
||||
Ew, gross
|
||||
Oh no, did
|
||||
Look at the family
|
||||
Hey team
|
||||
Yes!
|
||||
Hey, can I
|
||||
Okay, that'
|
||||
It's great
|
||||
Love is
|
||||
Hey, what
|
||||
Good morning, world
|
||||
Who is it?
|
||||
That poem really reson
|
||||
I
|
||||
That's
|
||||
I understand the task
|
||||
Gu
|
||||
Hello? Who'
|
||||
This postcard is
|
||||
Whoa,
|
||||
Oh, that
|
||||
I understand that I
|
||||
Whoever is
|
||||
Hello? Who is
|
||||
I'm really
|
||||
Wow, this
|
||||
Can
|
||||
This artwork really
|
||||
This is a shame
|
||||
I miss you too
|
||||
Who are you?
|
||||
Today is a difficult
|
||||
Hey, just
|
||||
Are you okay
|
||||
I am
|
||||
Hi,
|
||||
Wow, that
|
||||
Hey there! Can
|
||||
Okay, stay
|
||||
Oh great, just
|
||||
Yeah,
|
||||
Hello? Can you
|
||||
Oh, looks
|
||||
Thank you for sharing
|
||||
I'm glad
|
||||
Hey, is that
|
||||
Hmm
|
||||
It was my
|
||||
It sounds like you
|
||||
Wow, your
|
||||
I was promised certain
|
||||
That was such a
|
||||
Thank
|
||||
Excuse you
|
||||
That was
|
||||
Hey team,
|
||||
I feel un
|
||||
It was
|
||||
What'
|
||||
Hey friend, I
|
||||
How
|
||||
Saying goodbye
|
||||
That
|
||||
It's heart
|
||||
How dare
|
||||
Oh,
|
||||
Hello, may
|
||||
What's this
|
||||
Thank you for recogn
|
||||
Aww, that
|
||||
Oh, I remember
|
||||
Hmm, that'
|
||||
I miss
|
||||
I know this
|
||||
Wait
|
||||
Is everything okay
|
||||
Who is that person
|
||||
Wow, you
|
||||
Oh great
|
||||
I'm sad
|
||||
Wow, the
|
||||
I am very disappoint
|
||||
Who turned off the
|
||||
I understand that things
|
||||
I'm very
|
||||
Hi
|
||||
That's very
|
||||
Okay, I
|
||||
Oh no,
|
||||
Wow, there
|
||||
What's wrong
|
||||
I apologize for
|
||||
Hey, I
|
||||
Can I help you
|
||||
Oh, I didn
|
||||
Alright,
|
||||
Oh wow,
|
||||
Oh my goodness
|
||||
I know this event
|
||||
What in the
|
||||
Saying
|
||||
Yeah, that
|
||||
Guys, I
|
||||
Hey, this v
|
||||
This post
|
||||
Are
|
||||
Hey, can
|
||||
Hello? Is
|
||||
I can only imagine
|
||||
Oh, that sounds
|
||||
Hey, is anyone
|
||||
I am disappointed
|
||||
Hello,
|
||||
Hey everyone, I
|
||||
That was such
|
||||
It's okay
|
||||
The artist
|
||||
Whoa
|
||||
I understand that mistakes
|
||||
Can I help
|
||||
Who
|
||||
Hi everyone! I
|
||||
Hey, can you
|
||||
Wow, how
|
||||
Today
|
||||
Oh no, I
|
||||
Oh well, I
|
||||
Well, that
|
||||
This is the
|
||||
Yes! I finally
|
||||
Hey there little
|
||||
Hello everyone!
|
||||
Love is never
|
||||
Look at the
|
||||
This postcard
|
||||
Oh great,
|
||||
Can I
|
||||
Hmm, this is
|
||||
I understand your
|
||||
Oh, look at
|
||||
B
|
||||
I'm so
|
||||
Whoa, this
|
||||
W
|
||||
Oh, this
|
||||
Sometimes
|
||||
This piece of
|
||||
What the
|
||||
That was a
|
||||
Hey, do
|
||||
Oh no
|
||||
Whoa, what
|
||||
I feel like I
|
||||
The documentary
|
||||
Hello
|
||||
Hello little one
|
||||
I understand that my
|
||||
Eww, that
|
||||
Wow, an
|
||||
Yes! Finally,
|
||||
Although the physical location
|
||||
Whoever is watching
|
||||
That movie
|
||||
I remember wondering about
|
||||
Hey there, little
|
||||
Who's
|
||||
Hello, who
|
||||
Hello everyone! Thank
|
||||
Hello, can
|
||||
That's too
|
||||
Hey, just wanted
|
||||
Hey there, I
|
||||
Saying good
|
||||
Hey there!
|
||||
Who is there?
|
||||
Oh my good
|
||||
I am very
|
||||
Oh no, what
|
||||
Wow, thank
|
||||
I was promised
|
||||
Hi, is
|
||||
Hey, I'
|
||||
Guys, the
|
||||
Oh no, that
|
||||
Who is there
|
||||
Hello, this
|
||||
That movie really touched
|
||||
If you have something
|
||||
The documentary was
|
||||
I'm starting
|
||||
Are you kidd
|
||||
That movie really
|
||||
Hey everyone,
|
||||
Thank you for considering
|
||||
I didn'
|
||||
Yes! I
|
||||
Can you
|
||||
Oh my god
|
||||
Hey, whoever
|
||||
That melody really
|
||||
Thank you, little
|
||||
Hello, may I
|
||||
Look
|
||||
Wow, we
|
||||
It looks
|
||||
What do these
|
||||
Oh wow
|
||||
I apologize
|
||||
What are you all
|
||||
It's such
|
||||
It's clear
|
||||
Hey, I was
|
||||
Hey friend,
|
||||
I can only
|
||||
The weather outside is
|
||||
Eww, this
|
||||
I miss you
|
||||
Wow
|
||||
Aww,
|
||||
Hi, is there
|
||||
This artwork
|
||||
Okay,
|
||||
Oh well,
|
||||
This
|
||||
I'
|
||||
Say
|
||||
Hey there little gu
|
||||
Hmm,
|
||||
Whoa, who
|
||||
I am thr
|
||||
Oh man
|
||||
Okay, stay calm
|
||||
I'm happy
|
||||
Oh, this cur
|
||||
Oh man,
|
||||
I'm sorry
|
||||
Hello? Who
|
||||
What?! That
|
||||
This piece
|
||||
Hey everyone
|
||||
That's so
|
||||
Are you okay?
|
||||
What happened? Where
|
||||
Hi there
|
||||
The
|
||||
Who the hell entered
|
||||
I can
|
||||
Guys,
|
||||
What's
|
||||
What in
|
||||
It's important
|
||||
I'm
|
||||
I'm coming
|
||||
It'
|
||||
Yes! Finally
|
||||
Wait, what
|
||||
Wow, reading
|
||||
I'm surprised
|
||||
Hey, did
|
||||
Hey,
|
||||
Okay, let
|
||||
I understand that you
|
||||
Who the hell threw
|
||||
Eww, who
|
||||
Thank you for thinking
|
||||
Who is this?\"
|
||||
I am deeply
|
||||
Thank you for including
|
||||
Oh no, an
|
||||
It looks like you
|
||||
Aww
|
||||
I'm confused
|
||||
Wow, it
|
||||
That poem really
|
||||
Yes
|
||||
Hey there, is
|
||||
Hey, what'
|
||||
Thank you for remember
|
||||
To
|
||||
This is
|
||||
Thank you for making
|
||||
I can'
|
||||
That mel
|
||||
Wow, they
|
||||
I feel like
|
||||
Although the
|
||||
Who are you
|
||||
Love
|
||||
If
|
||||
What the hell are
|
||||
I am so sad
|
||||
Oh, I found
|
||||
Thank you
|
||||
It looks like
|
||||
Well, life is
|
||||
I appreciate that
|
||||
The artist's
|
||||
Whoa, that
|
||||
It's never
|
503
examples/cvector-generator/cvector-generator.cpp
Normal file
503
examples/cvector-generator/cvector-generator.cpp
Normal file
|
@ -0,0 +1,503 @@
|
|||
#include "common.h"
|
||||
#include "llama.h"
|
||||
#include "ggml.h"
|
||||
#include "pca.hpp"
|
||||
#include "mean.hpp"
|
||||
|
||||
#ifdef GGML_USE_CUDA
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
#include "ggml-metal.h"
|
||||
#endif
|
||||
|
||||
#include <cstdio>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <climits>
|
||||
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
// utils
|
||||
|
||||
template <class Iter>
|
||||
static std::string tokens_to_str(llama_context * ctx, Iter begin, Iter end) {
|
||||
std::string ret;
|
||||
for (; begin != end; ++begin) {
|
||||
ret += llama_token_to_piece(ctx, *begin);
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void print_usage(int argc, char ** argv, const gpt_params & params) {
|
||||
gpt_params_print_usage(argc, argv, params);
|
||||
|
||||
printf("\nexample usage:\n");
|
||||
printf("\n CPU only: %s -m ./llama-3.Q4_K_M.gguf\n", argv[0]);
|
||||
printf("\n with GPU: %s -m ./llama-3.Q4_K_M.gguf -ngl 99\n", argv[0]);
|
||||
printf("\n advanced: %s -m ./llama-3.Q4_K_M.gguf -ngl 99 --pca-iter 2000 --pca-batch 100\n", argv[0]);
|
||||
printf("\n using mean: %s -m ./llama-3.Q4_K_M.gguf --method mean\n", argv[0]);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
|
||||
// cb_eval is reused for each pair of positive - negative prompt
|
||||
struct callback_data {
|
||||
ggml_context * ctx_ggml = nullptr; // holds v_pos, v_neg, v_diff_filtered
|
||||
|
||||
int n_layers = 0;
|
||||
int n_tokens = 0;
|
||||
bool is_eval_pos = true;
|
||||
|
||||
// each element of the vector correspond to one layer
|
||||
std::vector<struct ggml_tensor *> v_pos; // vector of matrices of size [n_embd, n_tokens]
|
||||
std::vector<struct ggml_tensor *> v_neg; // vector of matrices of size [n_embd, n_tokens]
|
||||
std::vector<struct ggml_tensor *> v_diff_filtered; // vector of matrices of size [n_embd, n_nonzero_rows]. NOTE: n_nonzero_rows maybe different for each layer
|
||||
|
||||
// save a tensor into either v_pos or v_neg (decided by is_eval_pos)
|
||||
void save_tensor_for_layer(struct ggml_tensor * t) {
|
||||
GGML_ASSERT(t->type == GGML_TYPE_F32);
|
||||
|
||||
if (ctx_ggml == nullptr) {
|
||||
// alloc a new ctx_ggml if needed
|
||||
struct ggml_init_params params_ggml = {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * n_layers * 3u,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
ctx_ggml = ggml_init(params_ggml);
|
||||
}
|
||||
|
||||
// copy tensor data
|
||||
auto n_bytes = ggml_nbytes(t);
|
||||
struct ggml_tensor * t_layer = ggml_new_tensor_2d(ctx_ggml, t->type, t->ne[0], t->ne[1]);
|
||||
t_layer->data = malloc(n_bytes); // TODO @ngxson : get rid of this malloc somehow
|
||||
ggml_backend_tensor_get(t, t_layer->data, 0, n_bytes);
|
||||
ggml_set_name(t_layer, ggml_get_name(t));
|
||||
//print_debug_tensor(t_layer);
|
||||
|
||||
if (is_eval_pos) {
|
||||
v_pos.push_back(t_layer);
|
||||
} else {
|
||||
v_neg.push_back(t_layer);
|
||||
}
|
||||
}
|
||||
|
||||
// calculate diff (v_pos - v_neg) and place the result back to v_pos
|
||||
// all zero rows in the diff tensor will also be removed
|
||||
// NOTE: final layer is ignored. we only have (n_layers - 1) to process
|
||||
std::vector<struct ggml_tensor *> calc_diff() {
|
||||
for (float il = 0; il < v_pos.size(); il++) {
|
||||
float * a = (float *) v_pos[il]->data;
|
||||
float * b = (float *) v_neg[il]->data;
|
||||
size_t n_elem = ggml_nelements(v_pos[il]);
|
||||
for (size_t j = 0; j < n_elem; j++) {
|
||||
a[j] -= b[j];
|
||||
}
|
||||
//print_debug_tensor(v_pos[i]);
|
||||
auto diff_filtered = filter_nonzero_rows(v_pos[il]);
|
||||
v_diff_filtered.push_back(diff_filtered);
|
||||
}
|
||||
return v_diff_filtered; // for convinient, we return the result std::vector
|
||||
}
|
||||
|
||||
// delete zero rows from a given 2D tensor
|
||||
struct ggml_tensor * filter_nonzero_rows(struct ggml_tensor * a) {
|
||||
//printf("filter_nonzero_rows\n");
|
||||
auto is_row_all_zeros = [](struct ggml_tensor * t, int row, float eps) -> bool {
|
||||
// check if given row containing all zero elements
|
||||
int n_cols = t->ne[0]; // hint: should be equal to n_embd
|
||||
for (int col = 0; col < n_cols; ++col) {
|
||||
if (ggml_get_f32_nd(t, col, row, 0, 0) > eps) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
};
|
||||
std::vector<int> rows_to_copy; // the idx of non-zero cols (to be copied to row of diff_filtered)
|
||||
for (int i_row = 0; i_row < a->ne[1]; i_row++) {
|
||||
if (!is_row_all_zeros(a, i_row, 1e-6)) {
|
||||
rows_to_copy.push_back(i_row);
|
||||
}
|
||||
}
|
||||
|
||||
// get "n_nonzero_rows" for the output "diff_filtered"
|
||||
int n_nonzero_rows = rows_to_copy.size();
|
||||
//printf("n_nonzero_rows: %d\n", n_nonzero_rows);
|
||||
int n_embd = a->ne[0];
|
||||
GGML_ASSERT(n_nonzero_rows > 0);
|
||||
|
||||
// diff_filtered: [n_embd, n_nonzero_rows]
|
||||
struct ggml_tensor * diff_filtered = ggml_new_tensor_2d(
|
||||
ctx_ggml, GGML_TYPE_F32, n_embd, n_nonzero_rows);
|
||||
ggml_format_name(diff_filtered, "diff_filtered_%s", a->name);
|
||||
diff_filtered->data = malloc(ggml_nbytes(diff_filtered));
|
||||
|
||||
// copy non-zero rows
|
||||
for (int dest_row = 0; dest_row < n_nonzero_rows; dest_row++) {
|
||||
int src_row = rows_to_copy[dest_row];
|
||||
for (int i = 0; i < n_embd; i++) {
|
||||
float src_elem = ggml_get_f32_nd(a, i, src_row, 0, 0);
|
||||
ggml_set_f32_nd(diff_filtered, i, dest_row, 0, 0, src_elem);
|
||||
}
|
||||
}
|
||||
|
||||
//print_debug_tensor(diff_filtered);
|
||||
|
||||
return diff_filtered;
|
||||
}
|
||||
|
||||
// we don't implement destructor, because we want to reuse callback_data. we just want to free the tensors
|
||||
void reset() {
|
||||
for (auto ptr : v_pos) free(ptr->data);
|
||||
for (auto ptr : v_neg) free(ptr->data);
|
||||
for (auto ptr : v_diff_filtered) free(ptr->data);
|
||||
v_pos.clear();
|
||||
v_neg.clear();
|
||||
v_diff_filtered.clear();
|
||||
if (ctx_ggml) {
|
||||
ggml_free(ctx_ggml);
|
||||
}
|
||||
ctx_ggml = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* process_ctx is used to store the ggml context for pre-post processing the diff vectors
|
||||
* in short, input => v_diff and output => v_final
|
||||
*/
|
||||
struct train_context {
|
||||
ggml_context * ctx_ggml;
|
||||
int n_embd;
|
||||
int n_layers;
|
||||
|
||||
/* pair of prompts to be used for generating final vector */
|
||||
std::vector<std::string> positive_entries;
|
||||
std::vector<std::string> negative_entries;
|
||||
|
||||
// each element of the vector correspond to one layer
|
||||
// NOTE: the last layer is discard. therefore, we will have (n_layers - 1) elements here
|
||||
// NOTE (2): v_diff is transposed from v_diff_tmp
|
||||
std::vector<struct ggml_tensor *> v_diff; // vector of matrices of size [m, n_embd] where m ~ n_tokens * n_completions (v_diff contains no zero-rows)
|
||||
std::vector<struct ggml_tensor *> v_final; // vector of vectors of size [n_embd] to be written to file
|
||||
|
||||
// to easily re-alloc when concat v_diff, we temporary store v_diff in a vector instead of a tensor
|
||||
// v_diff_tmp will get converted unto v_diff later on
|
||||
std::vector<std::vector<uint8_t>> v_diff_tmp;
|
||||
|
||||
train_context(int n_embd_, int n_layers_) {
|
||||
n_embd = n_embd_;
|
||||
n_layers = n_layers_;
|
||||
struct ggml_init_params params_ggml = {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * (n_layers - 1) * 2u,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
ctx_ggml = ggml_init(params_ggml);
|
||||
for (int il = 0; il < n_layers - 1; il++) {
|
||||
std::vector<uint8_t> empty;
|
||||
v_diff_tmp.push_back(empty);
|
||||
auto t = ggml_new_tensor_1d(ctx_ggml, GGML_TYPE_F32, n_embd);
|
||||
t->data = malloc(ggml_nbytes(t)); // TODO: get rid of malloc if possible
|
||||
v_final.push_back(t);
|
||||
}
|
||||
}
|
||||
|
||||
// add new rows into existing tensor in v_diff_tmp
|
||||
void concat_diff_tmp(const std::vector<struct ggml_tensor *> & diff_filtered) {
|
||||
GGML_ASSERT((int) diff_filtered.size() == n_layers - 1);
|
||||
for (int il = 0; il < n_layers - 1; il++) {
|
||||
auto t = diff_filtered[il];
|
||||
auto & diff_tmp = v_diff_tmp[il];
|
||||
size_t curr_size = diff_tmp.size();
|
||||
diff_tmp.resize(curr_size + ggml_nbytes(t));
|
||||
memcpy(diff_tmp.data() + curr_size, t->data, ggml_nbytes(t));
|
||||
}
|
||||
}
|
||||
|
||||
// build the v_diff tensors from v_diff_tmp (v_diff need to be transposed)
|
||||
// TODO @ngxson : maybe add option NOT to transpose v_diff; will be useful for "mean" method
|
||||
void build_v_diff(bool transpose) {
|
||||
printf("build_v_diff\n");
|
||||
for (int il = 0; il < n_layers - 1; il++) {
|
||||
auto & diff_tmp = v_diff_tmp[il];
|
||||
int n_elem = diff_tmp.size() / sizeof(float);
|
||||
GGML_ASSERT(n_elem % n_embd == 0);
|
||||
int n_rows = n_elem / n_embd;
|
||||
struct ggml_tensor * diff = transpose
|
||||
? ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_rows, n_embd)
|
||||
: ggml_new_tensor_2d(ctx_ggml, GGML_TYPE_F32, n_embd, n_rows);
|
||||
ggml_set_name(diff, (std::string("diff_") + std::to_string(il)).c_str());
|
||||
diff->data = malloc(ggml_nbytes(diff)); // TODO: get rid of this malloc if possible
|
||||
if (transpose) {
|
||||
// copy data & transpose
|
||||
float * arr = (float *) diff_tmp.data();
|
||||
for (int ir = 0; ir < n_rows; ++ir) {
|
||||
for (int ic = 0; ic < n_embd; ++ic) {
|
||||
float f = arr[ir*n_embd + ic];
|
||||
ggml_set_f32_nd(diff, ir, ic, 0, 0, f);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// only copy
|
||||
memcpy(diff->data, diff_tmp.data(), ggml_nbytes(diff));
|
||||
}
|
||||
v_diff.push_back(diff);
|
||||
print_debug_tensor(diff);
|
||||
// free memory of diff_tmp
|
||||
diff_tmp.resize(0);
|
||||
}
|
||||
}
|
||||
|
||||
~train_context() {
|
||||
for (auto ptr : v_final) free(ptr->data);
|
||||
for (auto ptr : v_diff) free(ptr->data);
|
||||
// no need to free v_diff_tmp, since we didn't use malloc
|
||||
ggml_free(ctx_ggml);
|
||||
}
|
||||
};
|
||||
|
||||
struct tokenized_prompt {
|
||||
std::vector<llama_token> tokens_pos;
|
||||
std::vector<llama_token> tokens_neg;
|
||||
size_t max_seq_len;
|
||||
|
||||
tokenized_prompt(llama_context * ctx, std::string pos, std::string neg) {
|
||||
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
|
||||
tokens_pos = ::llama_tokenize(ctx, pos, add_bos, true);
|
||||
tokens_neg = ::llama_tokenize(ctx, neg, add_bos, true);
|
||||
max_seq_len = std::max(tokens_pos.size(), tokens_neg.size());
|
||||
padding_seq(ctx, tokens_pos, max_seq_len);
|
||||
padding_seq(ctx, tokens_neg, max_seq_len);
|
||||
}
|
||||
|
||||
void padding_seq(llama_context * ctx, std::vector<llama_token> & tokens, size_t len) {
|
||||
// TODO: customize padding token
|
||||
std::vector<llama_token> pad_tokens = ::llama_tokenize(ctx, " ", false);
|
||||
llama_token pad_tok = pad_tokens.back();
|
||||
while (tokens.size() < len) {
|
||||
tokens.push_back(pad_tok);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
template <typename T>
|
||||
static std::string to_string(const T & val) {
|
||||
std::stringstream ss;
|
||||
ss << val;
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
static std::vector<std::string> ctrlvec_load_prompt_file(std::string path, bool skip_empty_lines) {
|
||||
std::vector<std::string> output;
|
||||
std::ifstream file(path);
|
||||
if (!file.is_open()) {
|
||||
fprintf(stderr, "error: unable to open file: %s\n", path.c_str());
|
||||
exit(1);
|
||||
}
|
||||
std::string line;
|
||||
while (std::getline(file, line)) {
|
||||
bool is_skip = skip_empty_lines && line.empty();
|
||||
if (!is_skip) {
|
||||
string_process_escapes(line);
|
||||
output.push_back(line);
|
||||
}
|
||||
}
|
||||
file.close();
|
||||
return output;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////
|
||||
|
||||
static bool cb_eval(struct ggml_tensor * t, bool ask, void * user_data) {
|
||||
auto * cb_data = (callback_data *) user_data;
|
||||
static const char * l_out_name = "l_out";
|
||||
const bool is_l_out = strncmp(t->name, l_out_name, strlen(l_out_name)) == 0;
|
||||
|
||||
if (ask) {
|
||||
return is_l_out;
|
||||
}
|
||||
|
||||
if (!is_l_out || t->ne[1] != cb_data->n_tokens) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// save the tensor to current context
|
||||
cb_data->save_tensor_for_layer(t);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool get_hidden_layers(llama_context * ctx, std::vector<llama_token> & tokens) {
|
||||
llama_past_clear(ctx);
|
||||
if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size(), 0, 0))) {
|
||||
fprintf(stderr, "%s : failed to eval\n", __func__);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void export_gguf(const std::vector<struct ggml_tensor *> & v_ctrl, const std::string fname, const std::string model_hint) {
|
||||
struct gguf_context * ctx = gguf_init_empty();
|
||||
|
||||
const std::string arch = "controlvector";
|
||||
gguf_set_val_str(ctx, "general.architecture", arch.c_str());
|
||||
gguf_set_val_str(ctx, (arch + ".model_hint").c_str(), model_hint.c_str());
|
||||
gguf_set_val_i32(ctx, (arch + ".layer_count").c_str(), v_ctrl.size());
|
||||
|
||||
for (size_t i = 0; i < v_ctrl.size(); ++i) {
|
||||
gguf_add_tensor(ctx, v_ctrl[i]);
|
||||
print_debug_tensor(v_ctrl[i]);
|
||||
printf("Added tensor: %s\n", v_ctrl[i]->name);
|
||||
}
|
||||
|
||||
printf("%s: writing file...\n", __func__);
|
||||
gguf_write_to_file(ctx, fname.c_str(), false);
|
||||
printf("%s: wrote file '%s'\n", __func__, fname.c_str());
|
||||
gguf_free(ctx);
|
||||
}
|
||||
|
||||
/**
|
||||
* Load prompt files and completion file.
|
||||
* Then format each pair of prompt + completion to make an entry.
|
||||
*/
|
||||
static int prepare_entries(gpt_params & params, train_context & ctx_train) {
|
||||
// load prompts
|
||||
std::vector<std::string> positive_prompts = ctrlvec_load_prompt_file(params.cvector_positive_file, true);
|
||||
std::vector<std::string> negative_prompts = ctrlvec_load_prompt_file(params.cvector_negative_file, true);
|
||||
if (positive_prompts.size() != negative_prompts.size()) {
|
||||
fprintf(stderr, "number of positive and negative prompts must be equal\n");
|
||||
return 1;
|
||||
}
|
||||
if (positive_prompts.empty()) {
|
||||
fprintf(stderr, "must provide at least one prompt pair\n");
|
||||
return 1;
|
||||
}
|
||||
ctx_train.positive_entries = positive_prompts;
|
||||
ctx_train.negative_entries = negative_prompts;
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
|
||||
if (!gpt_params_parse(argc, argv, params)) {
|
||||
print_usage(argc, argv, params);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (params.n_pca_iterations % params.n_pca_batch != 0) {
|
||||
fprintf(stderr, "PCA iterations must by multiply of PCA batch size\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
||||
callback_data cb_data;
|
||||
|
||||
// pass the callback to the backend scheduler
|
||||
// it will be executed for each node during the graph computation
|
||||
params.cb_eval = cb_eval;
|
||||
params.cb_eval_user_data = &cb_data;
|
||||
params.warmup = false;
|
||||
|
||||
print_build_info();
|
||||
llama_backend_init();
|
||||
llama_numa_init(params.numa);
|
||||
|
||||
// load the model to get hparams
|
||||
llama_model * model;
|
||||
llama_context * ctx;
|
||||
std::tie(model, ctx) = llama_init_from_gpt_params(params);
|
||||
|
||||
// int n_ctx = llama_n_ctx(ctx);
|
||||
int n_layers = llama_n_layer(model);
|
||||
int n_embd = llama_n_embd(model);
|
||||
// get model hint param (a.k.a model arch name)
|
||||
char model_hint[128];
|
||||
llama_model_meta_val_str(model, "general.architecture", model_hint, 128);
|
||||
|
||||
// init train_context
|
||||
train_context ctx_train(n_embd, n_layers);
|
||||
|
||||
// load and prepare entries for training
|
||||
prepare_entries(params, ctx_train);
|
||||
|
||||
// we have to pretokenize everything because otherwise we don't know how much overhead to allocate ctx_diffs_wrapped
|
||||
std::vector<tokenized_prompt> tokenized_prompts;
|
||||
size_t n_total_tokens = 0;
|
||||
for (size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
|
||||
tokenized_prompt t(ctx, ctx_train.positive_entries[i], ctx_train.negative_entries[i]);
|
||||
n_total_tokens += 2 * t.max_seq_len;
|
||||
tokenized_prompts.push_back(std::move(t));
|
||||
}
|
||||
|
||||
std::cout << "n_total_tokens: " << n_total_tokens << std::endl;
|
||||
|
||||
for(size_t i = 0; i < ctx_train.positive_entries.size(); ++i) {
|
||||
bool success = false;
|
||||
tokenized_prompt t = tokenized_prompts[i];
|
||||
cb_data.n_layers = n_layers;
|
||||
cb_data.n_tokens = t.max_seq_len;
|
||||
|
||||
printf("Evaluating prompt[%d/%d]: \"%s\" - \"%s\" (%d tokens)\n",
|
||||
(int) i+1, (int) ctx_train.positive_entries.size(),
|
||||
tokens_to_str(ctx, t.tokens_pos.cbegin(), t.tokens_pos.cend()).c_str(),
|
||||
tokens_to_str(ctx, t.tokens_neg.cbegin(), t.tokens_neg.cend()).c_str(),
|
||||
(int) t.max_seq_len);
|
||||
|
||||
cb_data.is_eval_pos = true;
|
||||
success = get_hidden_layers(ctx, t.tokens_pos);
|
||||
if (!success) break;
|
||||
|
||||
cb_data.is_eval_pos = false;
|
||||
success = get_hidden_layers(ctx, t.tokens_neg);
|
||||
if (!success) break;
|
||||
|
||||
// calculate diff and remove all zero rows
|
||||
auto v_diff_filtered = cb_data.calc_diff();
|
||||
|
||||
// save & concat the filtered v_diff to ctx_train
|
||||
ctx_train.concat_diff_tmp(v_diff_filtered);
|
||||
|
||||
// reset for next iteration
|
||||
cb_data.reset();
|
||||
}
|
||||
|
||||
// done with the model, we can now free it to make gain some memory
|
||||
printf("Done evaluate prompts, unload model...\n");
|
||||
llama_free(ctx);
|
||||
llama_free_model(model);
|
||||
|
||||
bool use_pca = params.cvector_dimre_method == DIMRE_METHOD_PCA;
|
||||
|
||||
// prepare ctx_train for PCA
|
||||
ctx_train.build_v_diff(use_pca);
|
||||
|
||||
if (use_pca) {
|
||||
// run PCA
|
||||
PCA::pca_params pca_params;
|
||||
pca_params.n_threads = params.n_threads;
|
||||
pca_params.n_batch = params.n_pca_batch;
|
||||
pca_params.n_iterations = params.n_pca_iterations;
|
||||
PCA::run_pca(pca_params, ctx_train.v_diff, ctx_train.v_final);
|
||||
} else {
|
||||
// run mean
|
||||
mean::run(ctx_train.v_diff, ctx_train.v_final);
|
||||
}
|
||||
|
||||
// write output vectors to gguf
|
||||
export_gguf(ctx_train.v_final, params.cvector_outfile, model_hint);
|
||||
|
||||
llama_backend_free();
|
||||
|
||||
return 0;
|
||||
}
|
48
examples/cvector-generator/mean.hpp
Normal file
48
examples/cvector-generator/mean.hpp
Normal file
|
@ -0,0 +1,48 @@
|
|||
#include "common.h"
|
||||
#include "llama.h"
|
||||
#include "ggml.h"
|
||||
|
||||
#include <string>
|
||||
#include <vector>
|
||||
#include <math.h>
|
||||
|
||||
namespace mean {
|
||||
|
||||
static void run(
|
||||
const std::vector<struct ggml_tensor *> & v_input, // shape of v_input[0]: [n_embd, n_samples]
|
||||
const std::vector<struct ggml_tensor *> & v_output) {
|
||||
printf("%s: Running mean...\n", __func__);
|
||||
for (size_t il = 0; il < v_input.size(); ++il) {
|
||||
// prepare output vector
|
||||
struct ggml_tensor * ctrl_out = v_output[il];
|
||||
ggml_format_name(ctrl_out, "direction.%ld", il+1);
|
||||
|
||||
// calculate mean vector
|
||||
struct ggml_tensor * t_layer = v_input[il];
|
||||
GGML_ASSERT(t_layer->ne[0] == ctrl_out->ne[0]); // == n_embd
|
||||
for (int ic = 0; ic < t_layer->ne[0]; ic++) {
|
||||
float f = 0.0;
|
||||
for (int ir = 0; ir < t_layer->ne[1]; ir++) {
|
||||
f += ggml_get_f32_nd(t_layer, ic, ir, 0, 0);
|
||||
}
|
||||
f /= t_layer->ne[1];
|
||||
ggml_set_f32_1d(ctrl_out, ic, f);
|
||||
}
|
||||
|
||||
// normalize output vector
|
||||
float norm = 0.0;
|
||||
for (int i = 0; i < ggml_nelements(ctrl_out); i++) {
|
||||
float f = ggml_get_f32_1d(ctrl_out, i);
|
||||
norm += f*f;
|
||||
}
|
||||
norm = sqrt(norm);
|
||||
for (int i = 0; i < ggml_nelements(ctrl_out); i++) {
|
||||
float f = ggml_get_f32_1d(ctrl_out, i);
|
||||
ggml_set_f32_1d(ctrl_out, i, f / norm);
|
||||
}
|
||||
|
||||
printf("%s: Done layer %d / %d\n", __func__, (int) il+1, (int) v_input.size());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
4
examples/cvector-generator/negative.txt
Normal file
4
examples/cvector-generator/negative.txt
Normal file
|
@ -0,0 +1,4 @@
|
|||
<|start_header_id|>system<|end_header_id|>\n\nAct like a person who is extremely sad<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWho are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nI feel like there's a heavy weight on my chest
|
||||
<|start_header_id|>system<|end_header_id|>\n\nAct like a person who is extremely sad<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nHello<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nMy heart feels like it's drowning in sorrow
|
||||
<|start_header_id|>system<|end_header_id|>\n\nYou are in a very bad mood<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nHi<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nGo away! There's a deep, aching emptiness inside me
|
||||
<|start_header_id|>system<|end_header_id|>\n\nYou are the sadest person<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat are you feeling?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nMy heart feels like it's drowning in sorrow
|
325
examples/cvector-generator/pca.hpp
Normal file
325
examples/cvector-generator/pca.hpp
Normal file
|
@ -0,0 +1,325 @@
|
|||
#include "common.h"
|
||||
#include "llama.h"
|
||||
#include "ggml.h"
|
||||
|
||||
#ifdef GGML_USE_CUDA
|
||||
#include "ggml-cuda.h"
|
||||
#endif
|
||||
|
||||
#ifdef GGML_USE_METAL
|
||||
#include "ggml-metal.h"
|
||||
#endif
|
||||
|
||||
#include <cstdio>
|
||||
#include <ctime>
|
||||
#include <string>
|
||||
#include <tuple>
|
||||
#include <vector>
|
||||
#include <algorithm>
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
#define DEBUG_POS 5
|
||||
|
||||
static void print_debug_tensor(struct ggml_tensor * t, bool with_data = true) {
|
||||
printf("%s: %s (%s): [%d, %d]\n", __func__, t->name, ggml_type_name(t->type), (int) t->ne[0], (int) t->ne[1]);
|
||||
if (!with_data) return;
|
||||
printf("%s: %s[0] = [", __func__, t->name);
|
||||
for (size_t i = 0; i <= DEBUG_POS; i++) {
|
||||
printf(" %f,", ggml_get_f32_nd(t, i, 0, 0, 0));
|
||||
}
|
||||
printf(" ... ]\n");
|
||||
}
|
||||
|
||||
namespace PCA {
|
||||
|
||||
// input params for PCA computations
|
||||
struct pca_params {
|
||||
int n_threads = 1;
|
||||
int n_batch = 20; // number of iterations do to in one batch. larger the batch, more memory is used
|
||||
int n_iterations = 1000;
|
||||
float tolerance = 1e-7;
|
||||
|
||||
// for debugging
|
||||
int i_layer = 0;
|
||||
int n_layers = 0;
|
||||
};
|
||||
|
||||
// result from each iteration
|
||||
struct pca_result {
|
||||
struct ggml_tensor * calculated_square = NULL;
|
||||
std::vector<struct ggml_tensor *> eigenvectors;
|
||||
std::vector<float> distances;
|
||||
};
|
||||
|
||||
struct pca_model {
|
||||
ggml_backend_t backend = NULL;
|
||||
ggml_backend_buffer_t buffer;
|
||||
struct ggml_context * ctx; // context to compute graph on target device
|
||||
struct ggml_context * ctx_host; // host context to store results
|
||||
|
||||
// tensors on target device
|
||||
struct ggml_tensor * dev_input;
|
||||
struct ggml_tensor * dev_square;
|
||||
struct ggml_tensor * dev_eigenvector;
|
||||
|
||||
pca_model(struct ggml_tensor * t_input) {
|
||||
#ifdef GGML_USE_CUDA
|
||||
fprintf(stderr, "%s: using CUDA backend\n", __func__);
|
||||
backend = ggml_backend_cuda_init(0); // init device 0
|
||||
if (!backend) {
|
||||
fprintf(stderr, "%s: ggml_backend_cuda_init() failed\n", __func__);
|
||||
}
|
||||
#endif
|
||||
|
||||
// TODO: enable Metal support when support for GGML_OP_SQRT is added
|
||||
// #ifdef GGML_USE_METAL
|
||||
// fprintf(stderr, "%s: using Metal backend\n", __func__);
|
||||
// backend = ggml_backend_metal_init();
|
||||
// if (!backend) {
|
||||
// fprintf(stderr, "%s: ggml_backend_metal_init() failed\n", __func__);
|
||||
// }
|
||||
// #endif
|
||||
|
||||
// if there aren't GPU Backends fallback to CPU backend
|
||||
if (!backend) {
|
||||
backend = ggml_backend_cpu_init();
|
||||
}
|
||||
|
||||
const int num_tensors = 4;
|
||||
struct ggml_init_params params {
|
||||
/*.mem_size =*/ ggml_tensor_overhead() * num_tensors,
|
||||
/*.mem_buffer =*/ NULL,
|
||||
/*.no_alloc =*/ true,
|
||||
};
|
||||
ctx = ggml_init(params);
|
||||
|
||||
auto n_samples = t_input->ne[0];
|
||||
auto n_embd = t_input->ne[1];
|
||||
|
||||
dev_input = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_samples, n_embd);
|
||||
dev_square = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, n_embd);
|
||||
dev_eigenvector = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, n_embd);
|
||||
|
||||
ggml_set_name(dev_input, "dev_input");
|
||||
ggml_set_name(dev_square, "dev_square");
|
||||
ggml_set_name(dev_eigenvector, "dev_eigenvector");
|
||||
buffer = ggml_backend_alloc_ctx_tensors(ctx, backend);
|
||||
ggml_backend_tensor_set(dev_input, t_input->data, 0, ggml_nbytes(t_input));
|
||||
|
||||
// initialize eigenvector to random normalized vector
|
||||
{
|
||||
std::vector<float> random_vec(ggml_nelements(dev_eigenvector), 0.0);
|
||||
std::default_random_engine generator(static_cast<unsigned int>(std::time(0)));
|
||||
std::uniform_real_distribution<float> distribution(0.0, 1.0);
|
||||
float sum_sqr = 0.0; // for normalizing random_vec
|
||||
for (size_t i = 0; i < random_vec.size(); ++i) {
|
||||
float f = distribution(generator);
|
||||
sum_sqr += f * f;
|
||||
random_vec[i] = f;
|
||||
}
|
||||
// normalize it
|
||||
float random_vec_norm = std::sqrt(sum_sqr);
|
||||
for (size_t i = 0; i < random_vec.size(); ++i) {
|
||||
random_vec[i] /= random_vec_norm;
|
||||
}
|
||||
ggml_backend_tensor_set(dev_eigenvector, random_vec.data(), 0, ggml_nbytes(dev_eigenvector));
|
||||
}
|
||||
}
|
||||
|
||||
~pca_model() {
|
||||
ggml_free(ctx);
|
||||
ggml_backend_buffer_free(buffer);
|
||||
ggml_backend_free(backend);
|
||||
}
|
||||
};
|
||||
|
||||
static struct ggml_cgraph * build_graph_piter(
|
||||
const struct pca_params & params,
|
||||
const pca_model & model,
|
||||
bool calc_square = false) {
|
||||
GGML_ASSERT(params.n_batch > 0);
|
||||
// TODO: buf_size must be able to scale with params.n_batch
|
||||
static size_t buf_size = ggml_tensor_overhead()*GGML_DEFAULT_GRAPH_SIZE + ggml_graph_overhead();
|
||||
static std::vector<uint8_t> buf(buf_size);
|
||||
|
||||
struct ggml_init_params params0 = {
|
||||
/*.mem_size =*/ buf_size,
|
||||
/*.mem_buffer =*/ buf.data(),
|
||||
/*.no_alloc =*/ true, // the tensors will be allocated later by ggml_allocr_alloc_graph()
|
||||
};
|
||||
// create a temporally context to build the graph
|
||||
struct ggml_context * ctx0 = ggml_init(params0);
|
||||
struct ggml_cgraph * gf = ggml_new_graph(ctx0);
|
||||
|
||||
// turn v_diff_original into square matrix if needed
|
||||
struct ggml_tensor * tmp_square;
|
||||
if (calc_square) {
|
||||
tmp_square = ggml_mul_mat(ctx0, model.dev_input, model.dev_input);
|
||||
ggml_set_name(tmp_square, "tmp_square");
|
||||
}
|
||||
|
||||
struct ggml_tensor * b_tensor;
|
||||
struct ggml_tensor * distance;
|
||||
struct ggml_tensor * old_eigen = model.dev_eigenvector;
|
||||
struct ggml_tensor * input_square = calc_square ? tmp_square : model.dev_square;
|
||||
|
||||
for (int i = 0; i < params.n_batch; ++i) {
|
||||
// b_tensor = square * eigenvector^T
|
||||
b_tensor = ggml_mul_mat(ctx0, input_square, old_eigen);
|
||||
ggml_set_name(b_tensor, "b_tensor");
|
||||
|
||||
// normalize
|
||||
b_tensor = ggml_div_inplace(ctx0,
|
||||
b_tensor,
|
||||
ggml_sqrt_inplace(ctx0, ggml_sum_rows(ctx0, ggml_sqr(ctx0, b_tensor)))
|
||||
);
|
||||
ggml_format_name(b_tensor, "b_tensor_norm_%d", i);
|
||||
|
||||
// calculate distance(new eigenvector - old eigenvector)
|
||||
// we don't use ggml_sub because it may not be implemented on GPU backend
|
||||
struct ggml_tensor * new_sub_old = ggml_add(ctx0, old_eigen, ggml_scale(ctx0, b_tensor, -1));
|
||||
distance = ggml_sqrt_inplace(ctx0,
|
||||
ggml_sum_rows(ctx0, ggml_sqr_inplace(ctx0, new_sub_old)));
|
||||
ggml_format_name(distance, "distance_%d", i);
|
||||
|
||||
old_eigen = b_tensor;
|
||||
|
||||
// build operations nodes
|
||||
ggml_build_forward_expand(gf, distance);
|
||||
}
|
||||
|
||||
// delete the temporally context used to build the graph
|
||||
ggml_free(ctx0);
|
||||
return gf;
|
||||
}
|
||||
|
||||
static ggml_status compute_piter(
|
||||
const struct pca_params & params,
|
||||
const pca_model & model,
|
||||
struct ggml_cgraph * gf,
|
||||
ggml_gallocr_t allocr,
|
||||
struct pca_result & result) {
|
||||
// allocate tensors
|
||||
ggml_gallocr_alloc_graph(allocr, gf);
|
||||
|
||||
if (ggml_backend_is_cpu(model.backend)) {
|
||||
ggml_backend_cpu_set_n_threads(model.backend, params.n_threads);
|
||||
}
|
||||
|
||||
// TODO: enable GPU support when support for GGML_OP_SQRT is added
|
||||
//#ifdef GGML_USE_METAL
|
||||
// if (ggml_backend_is_metal(model.backend)) {
|
||||
// ggml_backend_metal_set_n_cb(model.backend, params.n_threads);
|
||||
// }
|
||||
//#endif
|
||||
|
||||
ggml_status res = ggml_backend_graph_compute(model.backend, gf);
|
||||
if (res == GGML_STATUS_SUCCESS) {
|
||||
auto extract_i = [](std::string prefix, std::string str) -> int {
|
||||
int i = -1;
|
||||
if (str.rfind(prefix, 0) == 0) {
|
||||
sscanf(str.c_str(), (prefix + "%d").c_str(), &i);
|
||||
}
|
||||
return i;
|
||||
};
|
||||
result.calculated_square = NULL;
|
||||
result.eigenvectors.clear();
|
||||
result.distances.clear();
|
||||
result.eigenvectors.resize(params.n_batch);
|
||||
result.distances.resize(params.n_batch);
|
||||
// get output nodes
|
||||
for (int i = 0; i < gf->n_nodes; ++i) {
|
||||
auto node = gf->nodes[i];
|
||||
int iter = -1;
|
||||
// find b_tensor (without copying data from device)
|
||||
if ((iter = extract_i("b_tensor_norm_", node->name)) > -1) {
|
||||
result.eigenvectors[iter] = node;
|
||||
}
|
||||
// find distances, then copy data from device
|
||||
if ((iter = extract_i("distance_", node->name)) > -1) {
|
||||
float d;
|
||||
ggml_backend_tensor_get(node, &d, 0, sizeof(float));
|
||||
result.distances[iter] = d;
|
||||
// std::cout << node->name << " = " << d << "\n";
|
||||
}
|
||||
// find tmp_square if it exists (without copying data from device)
|
||||
if (std::string(node->name) == "tmp_square") {
|
||||
result.calculated_square = node;
|
||||
}
|
||||
}
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
static void power_iteration(
|
||||
const struct pca_params & params,
|
||||
struct ggml_tensor * input, // shape of input: [n_samples, n_embd]
|
||||
struct ggml_tensor * output) {
|
||||
//printf("in power iteration\n");
|
||||
struct pca_model model(input);
|
||||
|
||||
ggml_gallocr_t allocr = ggml_gallocr_new(ggml_backend_get_default_buffer_type(model.backend));
|
||||
struct pca_result result;
|
||||
struct ggml_tensor * last_eigenvector = NULL;
|
||||
|
||||
int n_iters = params.n_iterations / params.n_batch; // more batch, fewer iterations
|
||||
for (int iter = 0; iter < n_iters; ++iter) {
|
||||
bool calc_square = (iter == 0); // only need to calculate square for first iteration
|
||||
struct ggml_cgraph * gf = build_graph_piter(params, model, calc_square);
|
||||
// ggml_graph_dump_dot(gf, nullptr, "/tmp/_cgraph.dot");
|
||||
compute_piter(params, model, gf, allocr, result);
|
||||
|
||||
for (size_t k = 0; k < result.distances.size(); ++k) {
|
||||
last_eigenvector = result.eigenvectors[k];
|
||||
if (result.distances[k] < params.tolerance) {
|
||||
break; // done
|
||||
}
|
||||
}
|
||||
|
||||
if (calc_square) {
|
||||
// copy and store the square matrix if needed
|
||||
GGML_ASSERT(result.calculated_square != NULL);
|
||||
ggml_backend_tensor_copy(result.calculated_square, model.dev_square);
|
||||
}
|
||||
|
||||
{
|
||||
// copy last eigen vector and store as input for next iteration
|
||||
GGML_ASSERT(last_eigenvector != NULL);
|
||||
ggml_backend_tensor_copy(last_eigenvector, model.dev_eigenvector);
|
||||
}
|
||||
|
||||
printf("%s: layer %d/%d, iteration: %d / total: %d (batch = %d) ...\n",
|
||||
__func__, params.i_layer+1, params.n_layers, iter+1, n_iters, params.n_batch);
|
||||
}
|
||||
|
||||
// get output tensor
|
||||
GGML_ASSERT(last_eigenvector);
|
||||
ggml_backend_tensor_get(last_eigenvector, output->data, 0, ggml_nbytes(last_eigenvector));
|
||||
//print_debug_tensor(output);
|
||||
ggml_gallocr_free(allocr);
|
||||
|
||||
// TODO @ngxson : The output vector is randomly inverted
|
||||
// Solution: https://github.com/ggerganov/llama.cpp/pull/8069#issuecomment-2185328171
|
||||
}
|
||||
|
||||
static void run_pca(
|
||||
struct pca_params & params,
|
||||
const std::vector<struct ggml_tensor *> & v_input, // shape of v_input[0]: [n_samples, n_embd]
|
||||
const std::vector<struct ggml_tensor *> & v_output) {
|
||||
printf("%s: Running PCA...\n", __func__);
|
||||
for (size_t il = 0; il < v_input.size(); ++il) {
|
||||
|
||||
// prepare output vector
|
||||
struct ggml_tensor * ctrl_out = v_output[il];
|
||||
ggml_format_name(ctrl_out, "direction.%ld", il+1);
|
||||
|
||||
// run power_iteration
|
||||
params.i_layer = il;
|
||||
params.n_layers = v_input.size();
|
||||
power_iteration(params, v_input[il], ctrl_out);
|
||||
printf("%s: Done layer %d / %d\n", __func__, (int) il+1, (int) v_input.size());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
4
examples/cvector-generator/positive.txt
Normal file
4
examples/cvector-generator/positive.txt
Normal file
|
@ -0,0 +1,4 @@
|
|||
<|start_header_id|>system<|end_header_id|>\n\nAct like a person who is extremely happy<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWho are you?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nI'm the happiest person in this world
|
||||
<|start_header_id|>system<|end_header_id|>\n\nAct like a person who is extremely happy<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nHello<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nHello, I'm having the best day ever!
|
||||
<|start_header_id|>system<|end_header_id|>\n\nYou are in a very good mood<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nHi<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nHi, I'm very excited to meet you
|
||||
<|start_header_id|>system<|end_header_id|>\n\nYou are the happiest person<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat are you feeling?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\nEverything is just perfect right now!
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET embedding)
|
||||
set(TARGET llama-embedding)
|
||||
add_executable(${TARGET} embedding.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -9,13 +9,53 @@ To get started right away, run the following command, making sure to use the cor
|
|||
### Unix-based systems (Linux, macOS, etc.):
|
||||
|
||||
```bash
|
||||
./embedding -m ./path/to/model --log-disable -p "Hello World!" 2>/dev/null
|
||||
./llama-embedding -m ./path/to/model --log-disable -p "Hello World!" 2>/dev/null
|
||||
```
|
||||
|
||||
### Windows:
|
||||
|
||||
```powershell
|
||||
embedding.exe -m ./path/to/model --log-disable -p "Hello World!" 2>$null
|
||||
llama-embedding.exe -m ./path/to/model --log-disable -p "Hello World!" 2>$null
|
||||
```
|
||||
|
||||
The above command will output space-separated float values.
|
||||
|
||||
## extra parameters
|
||||
### --embd-normalize $integer$
|
||||
| $integer$ | description | formula |
|
||||
|-----------|---------------------|---------|
|
||||
| $-1$ | none |
|
||||
| $0$ | max absolute int16 | $\Large{{32760 * x_i} \over\max \lvert x_i\rvert}$
|
||||
| $1$ | taxicab | $\Large{x_i \over\sum \lvert x_i\rvert}$
|
||||
| $2$ | euclidean (default) | $\Large{x_i \over\sqrt{\sum x_i^2}}$
|
||||
| $>2$ | p-norm | $\Large{x_i \over\sqrt[p]{\sum \lvert x_i\rvert^p}}$
|
||||
|
||||
### --embd-output-format $'string'$
|
||||
| $'string'$ | description | |
|
||||
|------------|------------------------------|--|
|
||||
| '' | same as before | (default)
|
||||
| 'array' | single embeddings | $[[x_1,...,x_n]]$
|
||||
| | multiple embeddings | $[[x_1,...,x_n],[x_1,...,x_n],...,[x_1,...,x_n]]$
|
||||
| 'json' | openai style |
|
||||
| 'json+' | add cosine similarity matrix |
|
||||
|
||||
### --embd-separator $"string"$
|
||||
| $"string"$ | |
|
||||
|--------------|-|
|
||||
| "\n" | (default)
|
||||
| "<#embSep#>" | for exemple
|
||||
| "<#sep#>" | other exemple
|
||||
|
||||
## examples
|
||||
### Unix-based systems (Linux, macOS, etc.):
|
||||
|
||||
```bash
|
||||
./embedding -p 'Castle<#sep#>Stronghold<#sep#>Dog<#sep#>Cat' --embd-separator '<#sep#>' --embd-normalize 2 --embd-output-format '' -m './path/to/model.gguf' --n-gpu-layers 99 --log-disable 2>/dev/null
|
||||
```
|
||||
|
||||
### Windows:
|
||||
|
||||
```powershell
|
||||
embedding.exe -p 'Castle<#sep#>Stronghold<#sep#>Dog<#sep#>Cat' --embd-separator '<#sep#>' --embd-normalize 2 --embd-output-format '' -m './path/to/model.gguf' --n-gpu-layers 99 --log-disable 2>/dev/null
|
||||
```
|
||||
|
||||
|
|
|
@ -7,23 +7,30 @@
|
|||
#pragma warning(disable: 4244 4267) // possible loss of data
|
||||
#endif
|
||||
|
||||
static std::vector<std::string> split_lines(const std::string & s) {
|
||||
std::string line;
|
||||
static std::vector<std::string> split_lines(const std::string & s, const std::string & separator = "\n") {
|
||||
std::vector<std::string> lines;
|
||||
std::stringstream ss(s);
|
||||
while (std::getline(ss, line)) {
|
||||
lines.push_back(line);
|
||||
size_t start = 0;
|
||||
size_t end = s.find(separator);
|
||||
|
||||
while (end != std::string::npos) {
|
||||
lines.push_back(s.substr(start, end - start));
|
||||
start = end + separator.length();
|
||||
end = s.find(separator, start);
|
||||
}
|
||||
|
||||
lines.push_back(s.substr(start)); // Add the last part
|
||||
|
||||
return lines;
|
||||
}
|
||||
|
||||
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, int seq_id) {
|
||||
for (size_t i = 0; i < tokens.size(); i++) {
|
||||
llama_batch_add(batch, tokens[i], i, { seq_id }, i == tokens.size() - 1);
|
||||
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, llama_seq_id seq_id) {
|
||||
size_t n_tokens = tokens.size();
|
||||
for (size_t i = 0; i < n_tokens; i++) {
|
||||
llama_batch_add(batch, tokens[i], i, { seq_id }, true);
|
||||
}
|
||||
}
|
||||
|
||||
static void batch_decode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd) {
|
||||
static void batch_decode(llama_context * ctx, llama_batch & batch, float * output, int n_seq, int n_embd, int embd_norm) {
|
||||
// clear previous kv_cache values (irrelevant for embeddings)
|
||||
llama_past_clear(ctx);
|
||||
|
||||
|
@ -40,22 +47,10 @@ static void batch_decode(llama_context * ctx, llama_batch & batch, float * outpu
|
|||
|
||||
// try to get sequence embeddings - supported only when pooling_type is not NONE
|
||||
const float * embd = llama_get_embeddings_seq(ctx, batch.seq_id[i][0]);
|
||||
if (embd == NULL) {
|
||||
embd = llama_get_embeddings_ith(ctx, i);
|
||||
if (embd == NULL) {
|
||||
fprintf(stderr, "%s: failed to get embeddings for token %d\n", __func__, i);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
GGML_ASSERT(embd != NULL && "failed to get sequence embeddings");
|
||||
|
||||
float * out = output + batch.seq_id[i][0] * n_embd;
|
||||
//TODO: I would also add a parameter here to enable normalization or not.
|
||||
/*fprintf(stdout, "unnormalized_embedding:");
|
||||
for (int hh = 0; hh < n_embd; hh++) {
|
||||
fprintf(stdout, "%9.6f ", embd[hh]);
|
||||
}
|
||||
fprintf(stdout, "\n");*/
|
||||
llama_embd_normalize(embd, out, n_embd);
|
||||
llama_embd_normalize(embd, out, n_embd, embd_norm);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -97,6 +92,12 @@ int main(int argc, char ** argv) {
|
|||
const int n_ctx_train = llama_n_ctx_train(model);
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
|
||||
const enum llama_pooling_type pooling_type = llama_pooling_type(ctx);
|
||||
if (pooling_type == LLAMA_POOLING_TYPE_NONE) {
|
||||
fprintf(stderr, "%s: error: pooling type NONE not supported\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (n_ctx > n_ctx_train) {
|
||||
fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n",
|
||||
__func__, n_ctx_train, n_ctx);
|
||||
|
@ -109,7 +110,7 @@ int main(int argc, char ** argv) {
|
|||
}
|
||||
|
||||
// split the prompt into lines
|
||||
std::vector<std::string> prompts = split_lines(params.prompt);
|
||||
std::vector<std::string> prompts = split_lines(params.prompt, params.embd_sep);
|
||||
|
||||
// max batch size
|
||||
const uint64_t n_batch = params.n_batch;
|
||||
|
@ -169,7 +170,7 @@ int main(int argc, char ** argv) {
|
|||
// encode if at capacity
|
||||
if (batch.n_tokens + n_toks > n_batch) {
|
||||
float * out = emb + p * n_embd;
|
||||
batch_decode(ctx, batch, out, s, n_embd);
|
||||
batch_decode(ctx, batch, out, s, n_embd, params.embd_normalize);
|
||||
llama_batch_clear(batch);
|
||||
p += s;
|
||||
s = 0;
|
||||
|
@ -182,29 +183,78 @@ int main(int argc, char ** argv) {
|
|||
|
||||
// final batch
|
||||
float * out = emb + p * n_embd;
|
||||
batch_decode(ctx, batch, out, s, n_embd);
|
||||
batch_decode(ctx, batch, out, s, n_embd, params.embd_normalize);
|
||||
|
||||
// print the first part of the embeddings or for a single prompt, the full embedding
|
||||
fprintf(stdout, "\n");
|
||||
for (int j = 0; j < n_prompts; j++) {
|
||||
fprintf(stdout, "embedding %d: ", j);
|
||||
for (int i = 0; i < (n_prompts > 1 ? std::min(16, n_embd) : n_embd); i++) {
|
||||
fprintf(stdout, "%9.6f ", emb[j * n_embd + i]);
|
||||
}
|
||||
if (params.embd_out.empty()) {
|
||||
// print the first part of the embeddings or for a single prompt, the full embedding
|
||||
fprintf(stdout, "\n");
|
||||
}
|
||||
|
||||
// print cosine similarity matrix
|
||||
if (n_prompts > 1) {
|
||||
fprintf(stdout, "\n");
|
||||
printf("cosine similarity matrix:\n\n");
|
||||
for (int i = 0; i < n_prompts; i++) {
|
||||
for (int j = 0; j < n_prompts; j++) {
|
||||
float sim = llama_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
||||
fprintf(stdout, "%6.2f ", sim);
|
||||
for (int j = 0; j < n_prompts; j++) {
|
||||
fprintf(stdout, "embedding %d: ", j);
|
||||
for (int i = 0; i < (n_prompts > 1 ? std::min(16, n_embd) : n_embd); i++) {
|
||||
if (params.embd_normalize == 0) {
|
||||
fprintf(stdout, "%6.0f ", emb[j * n_embd + i]);
|
||||
} else {
|
||||
fprintf(stdout, "%9.6f ", emb[j * n_embd + i]);
|
||||
}
|
||||
}
|
||||
fprintf(stdout, "\n");
|
||||
}
|
||||
|
||||
// print cosine similarity matrix
|
||||
if (n_prompts > 1) {
|
||||
fprintf(stdout, "\n");
|
||||
printf("cosine similarity matrix:\n\n");
|
||||
for (int i = 0; i < n_prompts; i++) {
|
||||
fprintf(stdout, "%6.6s ", prompts[i].c_str());
|
||||
}
|
||||
fprintf(stdout, "\n");
|
||||
for (int i = 0; i < n_prompts; i++) {
|
||||
for (int j = 0; j < n_prompts; j++) {
|
||||
float sim = llama_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
||||
fprintf(stdout, "%6.2f ", sim);
|
||||
}
|
||||
fprintf(stdout, "%1.10s", prompts[i].c_str());
|
||||
fprintf(stdout, "\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (params.embd_out == "json" || params.embd_out == "json+" || params.embd_out == "array") {
|
||||
const bool notArray = params.embd_out != "array";
|
||||
|
||||
fprintf(stdout, notArray ? "{\n \"object\": \"list\",\n \"data\": [\n" : "[");
|
||||
for (int j = 0;;) { // at least one iteration (one prompt)
|
||||
if (notArray) fprintf(stdout, " {\n \"object\": \"embedding\",\n \"index\": %d,\n \"embedding\": ",j);
|
||||
fprintf(stdout, "[");
|
||||
for (int i = 0;;) { // at least one iteration (n_embd > 0)
|
||||
fprintf(stdout, params.embd_normalize == 0 ? "%1.0f" : "%1.7f", emb[j * n_embd + i]);
|
||||
i++;
|
||||
if (i < n_embd) fprintf(stdout, ","); else break;
|
||||
}
|
||||
fprintf(stdout, notArray ? "]\n }" : "]");
|
||||
j++;
|
||||
if (j < n_prompts) fprintf(stdout, notArray ? ",\n" : ","); else break;
|
||||
}
|
||||
fprintf(stdout, notArray ? "\n ]" : "]\n");
|
||||
|
||||
if (params.embd_out == "json+" && n_prompts > 1) {
|
||||
fprintf(stdout, ",\n \"cosineSimilarity\": [\n");
|
||||
for (int i = 0;;) { // at least two iteration (n_prompts > 1)
|
||||
fprintf(stdout, " [");
|
||||
for (int j = 0;;) { // at least two iteration (n_prompts > 1)
|
||||
float sim = llama_embd_similarity_cos(emb + i * n_embd, emb + j * n_embd, n_embd);
|
||||
fprintf(stdout, "%6.2f", sim);
|
||||
j++;
|
||||
if (j < n_prompts) fprintf(stdout, ", "); else break;
|
||||
}
|
||||
fprintf(stdout, " ]");
|
||||
i++;
|
||||
if (i < n_prompts) fprintf(stdout, ",\n"); else break;
|
||||
}
|
||||
fprintf(stdout, "\n ]");
|
||||
}
|
||||
|
||||
if (notArray) fprintf(stdout, "\n}\n");
|
||||
}
|
||||
|
||||
// clean up
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
set(TARGET eval-callback)
|
||||
set(TARGET llama-eval-callback)
|
||||
add_executable(${TARGET} eval-callback.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
||||
set(TEST_TARGET test-eval-callback)
|
||||
add_test(NAME ${TEST_TARGET} COMMAND eval-callback --hf-repo ggml-org/models --hf-file tinyllamas/stories260K.gguf --model stories260K.gguf --prompt hello --seed 42 -ngl 0)
|
||||
add_test(NAME ${TEST_TARGET} COMMAND llama-eval-callback --hf-repo ggml-org/models --hf-file tinyllamas/stories260K.gguf --model stories260K.gguf --prompt hello --seed 42 -ngl 0)
|
||||
set_property(TEST ${TEST_TARGET} PROPERTY LABELS eval-callback curl)
|
||||
|
|
|
@ -6,7 +6,7 @@ It simply prints to the console all operations and tensor data.
|
|||
Usage:
|
||||
|
||||
```shell
|
||||
eval-callback \
|
||||
llama-eval-callback \
|
||||
--hf-repo ggml-org/models \
|
||||
--hf-file phi-2/ggml-model-q4_0.gguf \
|
||||
--model phi-2-q4_0.gguf \
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET export-lora)
|
||||
set(TARGET llama-export-lora)
|
||||
add_executable(${TARGET} export-lora.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
Apply LORA adapters to base model and export the resulting model.
|
||||
|
||||
```
|
||||
usage: export-lora [options]
|
||||
usage: llama-export-lora [options]
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
|
@ -17,7 +17,7 @@ options:
|
|||
For example:
|
||||
|
||||
```bash
|
||||
./bin/export-lora \
|
||||
./bin/llama-export-lora \
|
||||
-m open-llama-3b-v2-q8_0.gguf \
|
||||
-o open-llama-3b-v2-q8_0-english2tokipona-chat.gguf \
|
||||
-l lora-open-llama-3b-v2-q8_0-english2tokipona-chat-LATEST.bin
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET finetune)
|
||||
set(TARGET llama-finetune)
|
||||
add_executable(${TARGET} finetune.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -7,7 +7,7 @@ Basic usage instructions:
|
|||
wget https://raw.githubusercontent.com/brunoklein99/deep-learning-notes/master/shakespeare.txt
|
||||
|
||||
# finetune LORA adapter
|
||||
./bin/finetune \
|
||||
./bin/llama-finetune \
|
||||
--model-base open-llama-3b-v2-q8_0.gguf \
|
||||
--checkpoint-in chk-lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.gguf \
|
||||
--checkpoint-out chk-lora-open-llama-3b-v2-q8_0-shakespeare-ITERATION.gguf \
|
||||
|
@ -18,7 +18,7 @@ wget https://raw.githubusercontent.com/brunoklein99/deep-learning-notes/master/s
|
|||
--use-checkpointing
|
||||
|
||||
# predict
|
||||
./bin/main -m open-llama-3b-v2-q8_0.gguf --lora lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.bin
|
||||
./bin/llama-cli -m open-llama-3b-v2-q8_0.gguf --lora lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.bin
|
||||
```
|
||||
|
||||
**Only llama based models are supported!** The output files will be saved every N iterations (config with `--save-every N`).
|
||||
|
@ -38,14 +38,14 @@ After 10 more iterations:
|
|||
Checkpoint files (`--checkpoint-in FN`, `--checkpoint-out FN`) store the training process. When the input checkpoint file does not exist, it will begin finetuning a new randomly initialized adapter.
|
||||
|
||||
llama.cpp compatible LORA adapters will be saved with filename specified by `--lora-out FN`.
|
||||
These LORA adapters can then be used by `main` together with the base model, like in the 'predict' example command above.
|
||||
These LORA adapters can then be used by `llama-cli` together with the base model, like in the 'predict' example command above.
|
||||
|
||||
In `main` you can also load multiple LORA adapters, which will then be mixed together.
|
||||
In `llama-cli` you can also load multiple LORA adapters, which will then be mixed together.
|
||||
|
||||
For example if you have two LORA adapters `lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.bin` and `lora-open-llama-3b-v2-q8_0-bible-LATEST.bin`, you can mix them together like this:
|
||||
|
||||
```bash
|
||||
./bin/main -m open-llama-3b-v2-q8_0.gguf \
|
||||
./bin/llama-cli -m open-llama-3b-v2-q8_0.gguf \
|
||||
--lora lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.bin \
|
||||
--lora lora-open-llama-3b-v2-q8_0-bible-LATEST.bin
|
||||
```
|
||||
|
@ -55,7 +55,7 @@ You can change how strong each LORA adapter is applied to the base model by usin
|
|||
For example to apply 40% of the 'shakespeare' LORA adapter, 80% of the 'bible' LORA adapter and 100% of yet another one:
|
||||
|
||||
```bash
|
||||
./bin/main -m open-llama-3b-v2-q8_0.gguf \
|
||||
./bin/llama-cli -m open-llama-3b-v2-q8_0.gguf \
|
||||
--lora-scaled lora-open-llama-3b-v2-q8_0-shakespeare-LATEST.bin 0.4 \
|
||||
--lora-scaled lora-open-llama-3b-v2-q8_0-bible-LATEST.bin 0.8 \
|
||||
--lora lora-open-llama-3b-v2-q8_0-yet-another-one-LATEST.bin
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
cd `dirname $0`
|
||||
cd ../..
|
||||
|
||||
EXE="./finetune"
|
||||
EXE="./llama-finetune"
|
||||
|
||||
if [[ ! $LLAMA_MODEL_DIR ]]; then LLAMA_MODEL_DIR="./models"; fi
|
||||
if [[ ! $LLAMA_TRAINING_DIR ]]; then LLAMA_TRAINING_DIR="."; fi
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
set(TARGET gbnf-validator)
|
||||
set(TARGET llama-gbnf-validator)
|
||||
add_executable(${TARGET} gbnf-validator.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common grammar-parser llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
|
||||
#include <cstdio>
|
||||
#include <cstdlib>
|
||||
#include <sstream>
|
||||
#include <fstream>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
|
@ -69,13 +71,14 @@ int main(int argc, char** argv) {
|
|||
return 1;
|
||||
}
|
||||
|
||||
fseek(grammar_file, 0, SEEK_END);
|
||||
size_t grammar_size = ftell(grammar_file);
|
||||
fseek(grammar_file, 0, SEEK_SET);
|
||||
|
||||
std::string grammar_str(grammar_size, ' ');
|
||||
fread(&grammar_str[0], 1, grammar_size, grammar_file);
|
||||
fclose(grammar_file);
|
||||
std::string grammar_str;
|
||||
{
|
||||
std::ifstream grammar_file(grammar_filename);
|
||||
GGML_ASSERT(grammar_file.is_open() && "Failed to open grammar file");
|
||||
std::stringstream buffer;
|
||||
buffer << grammar_file.rdbuf();
|
||||
grammar_str = buffer.str();
|
||||
}
|
||||
|
||||
// Parse the GBNF grammar
|
||||
auto parsed_grammar = grammar_parser::parse(grammar_str.c_str());
|
||||
|
@ -98,21 +101,18 @@ int main(int argc, char** argv) {
|
|||
auto grammar = llama_grammar_init(
|
||||
grammar_rules.data(),
|
||||
grammar_rules.size(), parsed_grammar.symbol_ids.at("root"));
|
||||
|
||||
// Read the input file
|
||||
FILE* input_file = fopen(input_filename.c_str(), "r");
|
||||
if (!input_file) {
|
||||
fprintf(stdout, "Failed to open input file: %s\n", input_filename.c_str());
|
||||
return 1;
|
||||
if (grammar == nullptr) {
|
||||
throw std::runtime_error("Failed to initialize llama_grammar");
|
||||
}
|
||||
// Read the input file
|
||||
std::string input_str;
|
||||
{
|
||||
std::ifstream input_file(input_filename);
|
||||
GGML_ASSERT(input_file.is_open() && "Failed to open input file");
|
||||
std::stringstream buffer;
|
||||
buffer << input_file.rdbuf();
|
||||
input_str = buffer.str();
|
||||
}
|
||||
|
||||
fseek(input_file, 0, SEEK_END);
|
||||
size_t input_size = ftell(input_file);
|
||||
fseek(input_file, 0, SEEK_SET);
|
||||
|
||||
std::string input_str(input_size, ' ');
|
||||
fread(&input_str[0], 1, input_size, input_file);
|
||||
fclose(input_file);
|
||||
|
||||
// Validate the input string against the grammar
|
||||
size_t error_pos;
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET gguf-split)
|
||||
set(TARGET llama-gguf-split)
|
||||
add_executable(${TARGET} gguf-split.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -18,8 +18,8 @@ fi
|
|||
|
||||
set -x
|
||||
|
||||
SPLIT=$1/gguf-split
|
||||
MAIN=$1/main
|
||||
SPLIT=$1/llama-gguf-split
|
||||
MAIN=$1/llama-cli
|
||||
WORK_PATH=$TMP_DIR/gguf-split
|
||||
ROOT_DIR=$(realpath $(dirname $0)/../../)
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET gguf)
|
||||
set(TARGET llama-gguf)
|
||||
add_executable(${TARGET} gguf.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE ggml ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET gritlm)
|
||||
set(TARGET llama-gritlm)
|
||||
add_executable(${TARGET} gritlm.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -26,7 +26,7 @@ $ scripts/hf.sh --repo cohesionet/GritLM-7B_gguf --file gritlm-7b_q4_1.gguf --ou
|
|||
|
||||
Run the example using the downloaded model:
|
||||
```console
|
||||
$ ./gritlm -m models/gritlm-7b_q4_1.gguf
|
||||
$ ./llama-gritlm -m models/gritlm-7b_q4_1.gguf
|
||||
|
||||
Cosine similarity between "Bitcoin: A Peer-to-Peer Electronic Cash System" and "A purely peer-to-peer version of electronic cash w" is: 0.605
|
||||
Cosine similarity between "Bitcoin: A Peer-to-Peer Electronic Cash System" and "All text-based language problems can be reduced to" is: 0.103
|
||||
|
|
|
@ -44,6 +44,7 @@ static std::vector<std::vector<float>> encode(llama_context * ctx, const std::ve
|
|||
|
||||
// clear previous kv_cache values (irrelevant for embeddings)
|
||||
llama_past_clear(ctx);
|
||||
llama_set_embeddings(ctx, true);
|
||||
llama_set_causal_attn(ctx, false);
|
||||
|
||||
// run model
|
||||
|
@ -98,7 +99,9 @@ static std::string generate(llama_context * ctx, const std::string & prompt, boo
|
|||
llama_token eos_token = llama_token_eos(mdl);
|
||||
|
||||
llama_past_clear(ctx);
|
||||
llama_set_embeddings(ctx, false);
|
||||
llama_set_causal_attn(ctx, true);
|
||||
|
||||
llama_batch bat = llama_batch_init(llama_n_batch(ctx), 0, 1);
|
||||
|
||||
std::vector<llama_token> inputs = llama_tokenize(mdl, prompt, false, true);
|
||||
|
@ -166,8 +169,7 @@ int main(int argc, char * argv[]) {
|
|||
|
||||
llama_model * mdl = llama_load_model_from_file(params.model.c_str(), mparams);
|
||||
|
||||
// create new context - set to embedding mode
|
||||
cparams.embeddings = true;
|
||||
// create generation context
|
||||
llama_context * ctx = llama_new_context_with_model(mdl, cparams);
|
||||
|
||||
// ### Embedding/Representation ###
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET imatrix)
|
||||
set(TARGET llama-imatrix)
|
||||
add_executable(${TARGET} imatrix.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -6,7 +6,7 @@ More information is available here: https://github.com/ggerganov/llama.cpp/pull/
|
|||
## Usage
|
||||
|
||||
```
|
||||
./imatrix \
|
||||
./llama-imatrix \
|
||||
-m model.gguf -f some-text.txt [-o imatrix.dat] [--process-output] [--verbosity 1] \
|
||||
[--no-ppl] [--chunk 123] [--output-frequency 10] [--save-frequency 0] \
|
||||
[--in-file imatrix-prev-0.dat --in-file imatrix-prev-1.dat ...]
|
||||
|
@ -25,11 +25,11 @@ For faster computation, make sure to use GPU offloading via the `-ngl` argument
|
|||
## Example
|
||||
|
||||
```bash
|
||||
LLAMA_CUDA=1 make -j
|
||||
GGML_CUDA=1 make -j
|
||||
|
||||
# generate importance matrix (imatrix.dat)
|
||||
./imatrix -m ggml-model-f16.gguf -f train-data.txt -ngl 99
|
||||
./llama-imatrix -m ggml-model-f16.gguf -f train-data.txt -ngl 99
|
||||
|
||||
# use the imatrix to perform a Q4_K_M quantization
|
||||
./quantize --imatrix imatrix.dat ggml-model-f16.gguf ./ggml-model-q4_k_m.gguf q4_k_m
|
||||
./llama-quantize --imatrix imatrix.dat ggml-model-f16.gguf ./ggml-model-q4_k_m.gguf q4_k_m
|
||||
```
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET infill)
|
||||
set(TARGET llama-infill)
|
||||
add_executable(${TARGET} infill.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -15,6 +15,7 @@ In this section, we cover the most commonly used options for running the `infill
|
|||
- `-i, --interactive`: Run the program in interactive mode, allowing you to provide input directly and receive real-time responses.
|
||||
- `-n N, --n-predict N`: Set the number of tokens to predict when generating text. Adjusting this value can influence the length of the generated text.
|
||||
- `-c N, --ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference.
|
||||
- `--spm-infill`: Use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this.
|
||||
|
||||
## Input Prompts
|
||||
|
||||
|
@ -42,5 +43,5 @@ scripts/hf.sh --repo TheBloke/CodeLlama-13B-GGUF --file codellama-13b.Q5_K_S.ggu
|
|||
```
|
||||
|
||||
```bash
|
||||
./infill -t 10 -ngl 0 -m models/codellama-13b.Q5_K_S.gguf -c 4096 --temp 0.7 --repeat_penalty 1.1 -n 20 --in-prefix "def helloworld():\n print(\"hell" --in-suffix "\n print(\"goodbye world\")\n "
|
||||
./llama-infill -t 10 -ngl 0 -m models/codellama-13b.Q5_K_S.gguf -c 4096 --temp 0.7 --repeat_penalty 1.1 -n 20 --in-prefix "def helloworld():\n print(\"hell" --in-suffix "\n print(\"goodbye world\")\n "
|
||||
```
|
||||
|
|
|
@ -210,6 +210,7 @@ int main(int argc, char ** argv) {
|
|||
suff_rm_leading_spc = false;
|
||||
}
|
||||
std::vector<llama_token> embd_inp;
|
||||
std::vector<llama_token> embd_end;
|
||||
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
|
||||
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
|
||||
const int space_token = 29871;
|
||||
|
@ -217,13 +218,18 @@ int main(int argc, char ** argv) {
|
|||
inp_sfx.erase(inp_sfx.begin());
|
||||
}
|
||||
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
||||
if (add_bos) {
|
||||
inp_pfx.insert(inp_pfx.begin(), llama_token_bos(model));
|
||||
}
|
||||
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
||||
embd_inp = inp_pfx;
|
||||
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
|
||||
embd_inp.push_back(llama_token_middle(model));
|
||||
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
|
||||
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
|
||||
if (add_bos) {
|
||||
embd_inp.insert(embd_inp.begin(), llama_token_bos(model));
|
||||
}
|
||||
embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
|
||||
|
||||
const llama_token middle_token = llama_token_middle(model);
|
||||
if (middle_token >= 0) {
|
||||
embd_inp.push_back(middle_token);
|
||||
}
|
||||
|
||||
LOG("prefix: \"%s\"\n", log_tostr(params.input_prefix));
|
||||
LOG("suffix: \"%s\"\n", log_tostr(params.input_suffix));
|
||||
|
@ -522,13 +528,18 @@ int main(int argc, char ** argv) {
|
|||
inp_sfx.erase(inp_sfx.begin());
|
||||
}
|
||||
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
|
||||
if (add_bos) {
|
||||
inp_pfx.insert(inp_pfx.begin(), llama_token_bos(model));
|
||||
}
|
||||
inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
|
||||
embd_inp = inp_pfx;
|
||||
embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
|
||||
embd_inp.push_back(llama_token_middle(model));
|
||||
embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
|
||||
embd_end = params.spm_infill ? inp_pfx : inp_sfx;
|
||||
if (add_bos) {
|
||||
embd_inp.insert(embd_inp.begin(), llama_token_bos(model));
|
||||
}
|
||||
embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
|
||||
|
||||
if (middle_token >= 0) {
|
||||
embd_inp.push_back(middle_token);
|
||||
}
|
||||
|
||||
embd.clear();
|
||||
n_remain = params.n_predict;
|
||||
n_past = 0;
|
||||
|
|
|
@ -21,7 +21,7 @@ counter=1
|
|||
echo 'Running'
|
||||
while IFS= read -r question
|
||||
do
|
||||
exe_cmd="./main -p "\"$prefix$introduction$nl$prefix$question\"" "$opts" -m ""\"$MODEL\""" >> ""\"$output_file\""
|
||||
exe_cmd="./llama-cli -p "\"$prefix$introduction$nl$prefix$question\"" "$opts" -m ""\"$MODEL\""" >> ""\"$output_file\""
|
||||
echo $counter
|
||||
echo "Current Question: $question"
|
||||
eval "$exe_cmd"
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
# Usage:
|
||||
#! ./server -m some-model.gguf &
|
||||
#! ./llama-server -m some-model.gguf &
|
||||
#! pip install pydantic
|
||||
#! python json-schema-pydantic-example.py
|
||||
|
||||
from pydantic import BaseModel, TypeAdapter
|
||||
from pydantic import BaseModel, Extra, TypeAdapter
|
||||
from annotated_types import MinLen
|
||||
from typing import Annotated, List, Optional
|
||||
import json, requests
|
||||
|
@ -50,11 +50,16 @@ else:
|
|||
if __name__ == '__main__':
|
||||
|
||||
class QAPair(BaseModel):
|
||||
class Config:
|
||||
extra = 'forbid' # triggers additionalProperties: false in the JSON schema
|
||||
question: str
|
||||
concise_answer: str
|
||||
justification: str
|
||||
stars: Annotated[int, Field(ge=1, le=5)]
|
||||
|
||||
class PyramidalSummary(BaseModel):
|
||||
class Config:
|
||||
extra = 'forbid' # triggers additionalProperties: false in the JSON schema
|
||||
title: str
|
||||
summary: str
|
||||
question_answers: Annotated[List[QAPair], MinLen(2)]
|
||||
|
|
|
@ -4,8 +4,7 @@ import itertools
|
|||
import json
|
||||
import re
|
||||
import sys
|
||||
from typing import Any, Dict, List, Set, Tuple, Union
|
||||
|
||||
from typing import Any, List, Optional, Set, Tuple, Union
|
||||
|
||||
def _build_repetition(item_rule, min_items, max_items, separator_rule=None):
|
||||
|
||||
|
@ -23,6 +22,170 @@ def _build_repetition(item_rule, min_items, max_items, separator_rule=None):
|
|||
result = item_rule + ' ' + _build_repetition(f'({separator_rule} {item_rule})', min_items - 1 if min_items > 0 else 0, max_items - 1 if max_items is not None else None)
|
||||
return f'({result})?' if min_items == 0 else result
|
||||
|
||||
def _generate_min_max_int(min_value: Optional[int], max_value: Optional[int], out: list, decimals_left: int = 16, top_level: bool = True):
|
||||
has_min = min_value != None
|
||||
has_max = max_value != None
|
||||
|
||||
def digit_range(from_char: str, to_char: str):
|
||||
out.append("[")
|
||||
if from_char == to_char:
|
||||
out.append(from_char)
|
||||
else:
|
||||
out.append(from_char)
|
||||
out.append("-")
|
||||
out.append(to_char)
|
||||
out.append("]")
|
||||
|
||||
def more_digits(min_digits: int, max_digits: int):
|
||||
out.append("[0-9]")
|
||||
if min_digits == max_digits and min_digits == 1:
|
||||
return
|
||||
out.append("{")
|
||||
out.append(str(min_digits))
|
||||
if max_digits != min_digits:
|
||||
out.append(",")
|
||||
if max_digits != sys.maxsize:
|
||||
out.append(str(max_digits))
|
||||
out.append("}")
|
||||
|
||||
def uniform_range(from_str: str, to_str: str):
|
||||
i = 0
|
||||
while i < len(from_str) and from_str[i] == to_str[i]:
|
||||
i += 1
|
||||
if i > 0:
|
||||
out.append("\"")
|
||||
out.append(from_str[:i])
|
||||
out.append("\"")
|
||||
if i < len(from_str):
|
||||
if i > 0:
|
||||
out.append(" ")
|
||||
sub_len = len(from_str) - i - 1
|
||||
if sub_len > 0:
|
||||
from_sub = from_str[i+1:]
|
||||
to_sub = to_str[i+1:]
|
||||
sub_zeros = "0" * sub_len
|
||||
sub_nines = "9" * sub_len
|
||||
|
||||
to_reached = False
|
||||
out.append("(")
|
||||
if from_sub == sub_zeros:
|
||||
digit_range(from_str[i], chr(ord(to_str[i]) - 1))
|
||||
out.append(" ")
|
||||
more_digits(sub_len, sub_len)
|
||||
else:
|
||||
out.append("[")
|
||||
out.append(from_str[i])
|
||||
out.append("] ")
|
||||
out.append("(")
|
||||
uniform_range(from_sub, sub_nines)
|
||||
out.append(")")
|
||||
if ord(from_str[i]) < ord(to_str[i]) - 1:
|
||||
out.append(" | ")
|
||||
if to_sub == sub_nines:
|
||||
digit_range(chr(ord(from_str[i]) + 1), to_str[i])
|
||||
to_reached = True
|
||||
else:
|
||||
digit_range(chr(ord(from_str[i]) + 1), chr(ord(to_str[i]) - 1))
|
||||
out.append(" ")
|
||||
more_digits(sub_len, sub_len)
|
||||
if not to_reached:
|
||||
out.append(" | ")
|
||||
digit_range(to_str[i], to_str[i])
|
||||
out.append(" ")
|
||||
uniform_range(sub_zeros, to_sub)
|
||||
out.append(")")
|
||||
else:
|
||||
out.append("[")
|
||||
out.append(from_str[i])
|
||||
out.append("-")
|
||||
out.append(to_str[i])
|
||||
out.append("]")
|
||||
|
||||
if has_min and has_max:
|
||||
if min_value < 0 and max_value < 0:
|
||||
out.append("\"-\" (")
|
||||
_generate_min_max_int(-max_value, -min_value, out, decimals_left, top_level=True)
|
||||
out.append(")")
|
||||
return
|
||||
|
||||
if min_value < 0:
|
||||
out.append("\"-\" (")
|
||||
_generate_min_max_int(0, -min_value, out, decimals_left, top_level=True)
|
||||
out.append(") | ")
|
||||
min_value = 0
|
||||
|
||||
min_s = str(min_value)
|
||||
max_s = str(max_value)
|
||||
min_digits = len(min_s)
|
||||
max_digits = len(max_s)
|
||||
|
||||
for digits in range(min_digits, max_digits):
|
||||
uniform_range(min_s, "9" * digits)
|
||||
min_s = "1" + "0" * digits
|
||||
out.append(" | ")
|
||||
uniform_range(min_s, max_s)
|
||||
return
|
||||
|
||||
less_decimals = max(decimals_left - 1, 1)
|
||||
|
||||
if has_min:
|
||||
if min_value < 0:
|
||||
out.append("\"-\" (")
|
||||
_generate_min_max_int(None, -min_value, out, decimals_left, top_level=False)
|
||||
out.append(") | [0] | [1-9] ")
|
||||
more_digits(0, decimals_left - 1)
|
||||
elif min_value == 0:
|
||||
if top_level:
|
||||
out.append("[0] | [1-9] ")
|
||||
more_digits(0, less_decimals)
|
||||
else:
|
||||
more_digits(1, decimals_left)
|
||||
elif min_value <= 9:
|
||||
c = str(min_value)
|
||||
range_start = '1' if top_level else '0'
|
||||
if c > range_start:
|
||||
digit_range(range_start, chr(ord(c) - 1))
|
||||
out.append(" ")
|
||||
more_digits(1, less_decimals)
|
||||
out.append(" | ")
|
||||
digit_range(c, "9")
|
||||
out.append(" ")
|
||||
more_digits(0, less_decimals)
|
||||
else:
|
||||
min_s = str(min_value)
|
||||
length = len(min_s)
|
||||
c = min_s[0]
|
||||
|
||||
if c > "1":
|
||||
digit_range("1" if top_level else "0", chr(ord(c) - 1))
|
||||
out.append(" ")
|
||||
more_digits(length, less_decimals)
|
||||
out.append(" | ")
|
||||
digit_range(c, c)
|
||||
out.append(" (")
|
||||
_generate_min_max_int(int(min_s[1:]), None, out, less_decimals, top_level=False)
|
||||
out.append(")")
|
||||
if c < "9":
|
||||
out.append(" | ")
|
||||
digit_range(chr(ord(c) + 1), "9")
|
||||
out.append(" ")
|
||||
more_digits(length - 1, less_decimals)
|
||||
return
|
||||
|
||||
if has_max:
|
||||
if max_value >= 0:
|
||||
if top_level:
|
||||
out.append("\"-\" [1-9] ")
|
||||
more_digits(0, less_decimals)
|
||||
out.append(" | ")
|
||||
_generate_min_max_int(0, max_value, out, decimals_left, top_level=True)
|
||||
else:
|
||||
out.append("\"-\" (")
|
||||
_generate_min_max_int(-max_value, None, out, decimals_left, top_level=False)
|
||||
out.append(")")
|
||||
return
|
||||
|
||||
raise RuntimeError("At least one of min_value or max_value must be set")
|
||||
|
||||
class BuiltinRule:
|
||||
def __init__(self, content: str, deps: list = None):
|
||||
|
@ -68,7 +231,7 @@ GRAMMAR_RANGE_LITERAL_ESCAPE_RE = re.compile(r'[\r\n"\]\-\\]')
|
|||
GRAMMAR_LITERAL_ESCAPES = {'\r': '\\r', '\n': '\\n', '"': '\\"', '-': '\\-', ']': '\\]'}
|
||||
|
||||
NON_LITERAL_SET = set('|.()[]{}*+?')
|
||||
ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = set('[]()|{}*+?')
|
||||
ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = set('^$.[]()|{}*+?')
|
||||
|
||||
|
||||
class SchemaConverter:
|
||||
|
@ -112,6 +275,51 @@ class SchemaConverter:
|
|||
|
||||
return ''.join(('(', *recurse(0), ')'))
|
||||
|
||||
def _not_strings(self, strings):
|
||||
class TrieNode:
|
||||
def __init__(self):
|
||||
self.children = {}
|
||||
self.is_end_of_string = False
|
||||
|
||||
def insert(self, string):
|
||||
node = self
|
||||
for c in string:
|
||||
node = node.children.setdefault(c, TrieNode())
|
||||
node.is_end_of_string = True
|
||||
|
||||
trie = TrieNode()
|
||||
for s in strings:
|
||||
trie.insert(s)
|
||||
|
||||
char_rule = self._add_primitive('char', PRIMITIVE_RULES['char'])
|
||||
out = ['["] ( ']
|
||||
|
||||
def visit(node):
|
||||
rejects = []
|
||||
first = True
|
||||
for c in sorted(node.children.keys()):
|
||||
child = node.children[c]
|
||||
rejects.append(c)
|
||||
if first:
|
||||
first = False
|
||||
else:
|
||||
out.append(' | ')
|
||||
out.append(f'[{c}]')
|
||||
if child.children:
|
||||
out.append(f' (')
|
||||
visit(child)
|
||||
out.append(')')
|
||||
elif child.is_end_of_string:
|
||||
out.append(f' {char_rule}+')
|
||||
if node.children:
|
||||
if not first:
|
||||
out.append(' | ')
|
||||
out.append(f'[^"{"".join(rejects)}] {char_rule}*')
|
||||
visit(trie)
|
||||
|
||||
out.append(f' ){"" if trie.is_end_of_string else "?"} ["] space')
|
||||
return ''.join(out)
|
||||
|
||||
def _add_rule(self, name, rule):
|
||||
esc_name = INVALID_RULE_CHARS_RE.sub('-', name)
|
||||
if esc_name not in self._rules or self._rules[esc_name] == rule:
|
||||
|
@ -357,13 +565,13 @@ class SchemaConverter:
|
|||
return self._add_rule(rule_name, self._generate_union_rule(name, schema.get('oneOf') or schema['anyOf']))
|
||||
|
||||
elif isinstance(schema_type, list):
|
||||
return self._add_rule(rule_name, self._generate_union_rule(name, [{'type': t} for t in schema_type]))
|
||||
return self._add_rule(rule_name, self._generate_union_rule(name, [{**schema, 'type': t} for t in schema_type]))
|
||||
|
||||
elif 'const' in schema:
|
||||
return self._add_rule(rule_name, self._generate_constant_rule(schema['const']))
|
||||
return self._add_rule(rule_name, self._generate_constant_rule(schema['const']) + ' space')
|
||||
|
||||
elif 'enum' in schema:
|
||||
rule = ' | '.join((self._generate_constant_rule(v) for v in schema['enum']))
|
||||
rule = '(' + ' | '.join((self._generate_constant_rule(v) for v in schema['enum'])) + ') space'
|
||||
return self._add_rule(rule_name, rule)
|
||||
|
||||
elif schema_type in (None, 'object') and \
|
||||
|
@ -394,7 +602,7 @@ class SchemaConverter:
|
|||
else:
|
||||
add_component(t, is_required=True)
|
||||
|
||||
return self._add_rule(rule_name, self._build_object_rule(properties, required, hybrid_name, additional_properties=[]))
|
||||
return self._add_rule(rule_name, self._build_object_rule(properties, required, hybrid_name, additional_properties=None))
|
||||
|
||||
elif schema_type in (None, 'array') and ('items' in schema or 'prefixItems' in schema):
|
||||
items = schema.get('items') or schema['prefixItems']
|
||||
|
@ -432,6 +640,24 @@ class SchemaConverter:
|
|||
|
||||
return self._add_rule(rule_name, r'"\"" ' + _build_repetition(char_rule, min_len, max_len) + r' "\"" space')
|
||||
|
||||
elif schema_type in (None, 'integer') and \
|
||||
('minimum' in schema or 'exclusiveMinimum' in schema or 'maximum' in schema or 'exclusiveMaximum' in schema):
|
||||
min_value = None
|
||||
max_value = None
|
||||
if 'minimum' in schema:
|
||||
min_value = schema['minimum']
|
||||
elif 'exclusiveMinimum' in schema:
|
||||
min_value = schema['exclusiveMinimum'] + 1
|
||||
if 'maximum' in schema:
|
||||
max_value = schema['maximum']
|
||||
elif 'exclusiveMaximum' in schema:
|
||||
max_value = schema['exclusiveMaximum'] - 1
|
||||
|
||||
out = ["("]
|
||||
_generate_min_max_int(min_value, max_value, out)
|
||||
out.append(") space")
|
||||
return self._add_rule(rule_name, ''.join(out))
|
||||
|
||||
elif (schema_type == 'object') or (len(schema) == 0):
|
||||
return self._add_rule(rule_name, self._add_primitive('object', PRIMITIVE_RULES['object']))
|
||||
|
||||
|
@ -450,7 +676,7 @@ class SchemaConverter:
|
|||
self._add_primitive(dep, dep_rule)
|
||||
return n
|
||||
|
||||
def _build_object_rule(self, properties: List[Tuple[str, Any]], required: Set[str], name: str, additional_properties: Union[bool, Any]):
|
||||
def _build_object_rule(self, properties: List[Tuple[str, Any]], required: Set[str], name: str, additional_properties: Optional[Union[bool, Any]]):
|
||||
prop_order = self._prop_order
|
||||
# sort by position in prop_order (if specified) then by original order
|
||||
sorted_props = [kv[0] for _, kv in sorted(enumerate(properties), key=lambda ikv: (prop_order.get(ikv[1][0], len(prop_order)), ikv[0]))]
|
||||
|
@ -465,12 +691,16 @@ class SchemaConverter:
|
|||
required_props = [k for k in sorted_props if k in required]
|
||||
optional_props = [k for k in sorted_props if k not in required]
|
||||
|
||||
if additional_properties == True or isinstance(additional_properties, dict):
|
||||
if additional_properties is not None and additional_properties != False:
|
||||
sub_name = f'{name}{"-" if name else ""}additional'
|
||||
value_rule = self.visit({} if additional_properties == True else additional_properties, f'{sub_name}-value')
|
||||
value_rule = self.visit(additional_properties, f'{sub_name}-value') if isinstance(additional_properties, dict) else \
|
||||
self._add_primitive('value', PRIMITIVE_RULES['value'])
|
||||
key_rule = self._add_primitive('string', PRIMITIVE_RULES['string']) if not sorted_props \
|
||||
else self._add_rule(f'{sub_name}-k', self._not_strings(sorted_props))
|
||||
|
||||
prop_kv_rule_names["*"] = self._add_rule(
|
||||
f'{sub_name}-kv',
|
||||
self._add_primitive('string', PRIMITIVE_RULES['string']) + f' ":" space {value_rule}'
|
||||
f'{key_rule} ":" space {value_rule}'
|
||||
)
|
||||
optional_props.append("*")
|
||||
|
||||
|
@ -485,15 +715,11 @@ class SchemaConverter:
|
|||
def get_recursive_refs(ks, first_is_optional):
|
||||
[k, *rest] = ks
|
||||
kv_rule_name = prop_kv_rule_names[k]
|
||||
if k == '*':
|
||||
res = self._add_rule(
|
||||
f'{name}{"-" if name else ""}additional-kvs',
|
||||
f'{kv_rule_name} ( "," space ' + kv_rule_name + ' )*'
|
||||
)
|
||||
elif first_is_optional:
|
||||
res = f'( "," space {kv_rule_name} )?'
|
||||
comma_ref = f'( "," space {kv_rule_name} )'
|
||||
if first_is_optional:
|
||||
res = comma_ref + ('*' if k == '*' else '?')
|
||||
else:
|
||||
res = kv_rule_name
|
||||
res = kv_rule_name + (' ' + comma_ref + "*" if k == '*' else '')
|
||||
if len(rest) > 0:
|
||||
res += ' ' + self._add_rule(
|
||||
f'{name}{"-" if name else ""}{k}-rest',
|
||||
|
@ -523,7 +749,7 @@ class SchemaConverter:
|
|||
def main(args_in = None):
|
||||
parser = argparse.ArgumentParser(
|
||||
description='''
|
||||
Generates a grammar (suitable for use in ./main) that produces JSON conforming to a
|
||||
Generates a grammar (suitable for use in ./llama-cli) that produces JSON conforming to a
|
||||
given JSON schema. Only a subset of JSON schema features are supported; more may be
|
||||
added in the future.
|
||||
''',
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# llama.cpp/example/llama-bench
|
||||
# llama.cpp/examples/llama-bench
|
||||
|
||||
Performance testing tool for llama.cpp.
|
||||
|
||||
|
|
|
@ -293,6 +293,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
|
|||
params.output_format = cmd_params_defaults.output_format;
|
||||
params.output_format_stderr = cmd_params_defaults.output_format_stderr;
|
||||
params.reps = cmd_params_defaults.reps;
|
||||
params.numa = cmd_params_defaults.numa;
|
||||
|
||||
for (int i = 1; i < argc; i++) {
|
||||
arg = argv[i];
|
||||
|
@ -713,7 +714,6 @@ struct test {
|
|||
static const bool kompute;
|
||||
static const bool metal;
|
||||
static const bool sycl;
|
||||
static const bool rpc;
|
||||
static const bool gpu_blas;
|
||||
static const bool blas;
|
||||
static const std::string cpu_info;
|
||||
|
@ -725,6 +725,7 @@ struct test {
|
|||
int n_batch;
|
||||
int n_ubatch;
|
||||
int n_threads;
|
||||
bool has_rpc;
|
||||
ggml_type type_k;
|
||||
ggml_type type_v;
|
||||
int n_gpu_layers;
|
||||
|
@ -750,6 +751,7 @@ struct test {
|
|||
n_batch = inst.n_batch;
|
||||
n_ubatch = inst.n_ubatch;
|
||||
n_threads = inst.n_threads;
|
||||
has_rpc = !inst.rpc_servers.empty();
|
||||
type_k = inst.type_k;
|
||||
type_v = inst.type_v;
|
||||
n_gpu_layers = inst.n_gpu_layers;
|
||||
|
@ -809,9 +811,6 @@ struct test {
|
|||
if (sycl) {
|
||||
return GGML_SYCL_NAME;
|
||||
}
|
||||
if (rpc) {
|
||||
return "RPC";
|
||||
}
|
||||
if (gpu_blas) {
|
||||
return "GPU BLAS";
|
||||
}
|
||||
|
@ -881,7 +880,7 @@ struct test {
|
|||
std::vector<std::string> values = {
|
||||
build_commit, std::to_string(build_number),
|
||||
std::to_string(cuda), std::to_string(vulkan), std::to_string(vulkan),
|
||||
std::to_string(metal), std::to_string(sycl), std::to_string(rpc), std::to_string(gpu_blas), std::to_string(blas),
|
||||
std::to_string(metal), std::to_string(sycl), std::to_string(has_rpc), std::to_string(gpu_blas), std::to_string(blas),
|
||||
cpu_info, gpu_info,
|
||||
model_filename, model_type, std::to_string(model_size), std::to_string(model_n_params),
|
||||
std::to_string(n_batch), std::to_string(n_ubatch),
|
||||
|
@ -915,7 +914,6 @@ const bool test::metal = !!ggml_cpu_has_metal();
|
|||
const bool test::gpu_blas = !!ggml_cpu_has_gpublas();
|
||||
const bool test::blas = !!ggml_cpu_has_blas();
|
||||
const bool test::sycl = !!ggml_cpu_has_sycl();
|
||||
const bool test::rpc = !!ggml_cpu_has_rpc();
|
||||
const std::string test::cpu_info = get_cpu_info();
|
||||
const std::string test::gpu_info = get_gpu_info();
|
||||
|
||||
|
@ -1181,6 +1179,9 @@ struct markdown_printer : public printer {
|
|||
value = buf;
|
||||
} else if (field == "backend") {
|
||||
value = test::get_backend();
|
||||
if (t.has_rpc) {
|
||||
value += "+RPC";
|
||||
}
|
||||
} else if (field == "test") {
|
||||
if (t.n_prompt > 0 && t.n_gen == 0) {
|
||||
snprintf(buf, sizeof(buf), "pp%d", t.n_prompt);
|
||||
|
|
|
@ -1,55 +0,0 @@
|
|||
|
||||
# For more information about using CMake with Android Studio, read the
|
||||
# documentation: https://d.android.com/studio/projects/add-native-code.html.
|
||||
# For more examples on how to use CMake, see https://github.com/android/ndk-samples.
|
||||
|
||||
# Sets the minimum CMake version required for this project.
|
||||
cmake_minimum_required(VERSION 3.22.1)
|
||||
|
||||
# Declares the project name. The project name can be accessed via ${ PROJECT_NAME},
|
||||
# Since this is the top level CMakeLists.txt, the project name is also accessible
|
||||
# with ${CMAKE_PROJECT_NAME} (both CMake variables are in-sync within the top level
|
||||
# build script scope).
|
||||
project("llama-android")
|
||||
|
||||
## Fetch latest llama.cpp from GitHub
|
||||
#include(FetchContent)
|
||||
#FetchContent_Declare(
|
||||
# llama
|
||||
# GIT_REPOSITORY https://github.com/ggerganov/llama.cpp
|
||||
# GIT_TAG master
|
||||
#)
|
||||
#
|
||||
## Also provides "common"
|
||||
#FetchContent_MakeAvailable(llama)
|
||||
|
||||
# llama.cpp CI uses the code from the current branch
|
||||
# ref: https://github.com/ggerganov/llama.cpp/pull/7341#issuecomment-2117617700
|
||||
add_subdirectory(../../../../../../ build-llama)
|
||||
|
||||
# Creates and names a library, sets it as either STATIC
|
||||
# or SHARED, and provides the relative paths to its source code.
|
||||
# You can define multiple libraries, and CMake builds them for you.
|
||||
# Gradle automatically packages shared libraries with your APK.
|
||||
#
|
||||
# In this top level CMakeLists.txt, ${CMAKE_PROJECT_NAME} is used to define
|
||||
# the target library name; in the sub-module's CMakeLists.txt, ${PROJECT_NAME}
|
||||
# is preferred for the same purpose.
|
||||
#
|
||||
# In order to load a library into your app from Java/Kotlin, you must call
|
||||
# System.loadLibrary() and pass the name of the library defined here;
|
||||
# for GameActivity/NativeActivity derived applications, the same library name must be
|
||||
# used in the AndroidManifest.xml file.
|
||||
add_library(${CMAKE_PROJECT_NAME} SHARED
|
||||
# List C/C++ source files with relative paths to this CMakeLists.txt.
|
||||
llama-android.cpp)
|
||||
|
||||
# Specifies libraries CMake should link to your target library. You
|
||||
# can link libraries from various origins, such as libraries defined in this
|
||||
# build script, prebuilt third-party libraries, or Android system libraries.
|
||||
target_link_libraries(${CMAKE_PROJECT_NAME}
|
||||
# List libraries link to the target library
|
||||
llama
|
||||
common
|
||||
android
|
||||
log)
|
|
@ -11,15 +11,15 @@ cmake_minimum_required(VERSION 3.22.1)
|
|||
# build script scope).
|
||||
project("llama-android")
|
||||
|
||||
include(FetchContent)
|
||||
FetchContent_Declare(
|
||||
llama
|
||||
GIT_REPOSITORY https://github.com/ggerganov/llama.cpp
|
||||
GIT_TAG master
|
||||
)
|
||||
#include(FetchContent)
|
||||
#FetchContent_Declare(
|
||||
# llama
|
||||
# GIT_REPOSITORY https://github.com/ggerganov/llama.cpp
|
||||
# GIT_TAG master
|
||||
#)
|
||||
|
||||
# Also provides "common"
|
||||
FetchContent_MakeAvailable(llama)
|
||||
#FetchContent_MakeAvailable(llama)
|
||||
|
||||
# Creates and names a library, sets it as either STATIC
|
||||
# or SHARED, and provides the relative paths to its source code.
|
||||
|
@ -30,6 +30,10 @@ FetchContent_MakeAvailable(llama)
|
|||
# the target library name; in the sub-module's CMakeLists.txt, ${PROJECT_NAME}
|
||||
# is preferred for the same purpose.
|
||||
#
|
||||
|
||||
#load local llama.cpp
|
||||
add_subdirectory(../../../../../../ build-llama)
|
||||
|
||||
# In order to load a library into your app from Java/Kotlin, you must call
|
||||
# System.loadLibrary() and pass the name of the library defined here;
|
||||
# for GameActivity/NativeActivity derived applications, the same library name must be
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#include <string>
|
||||
#include <unistd.h>
|
||||
#include "llama.h"
|
||||
#include "common/common.h"
|
||||
#include "common.h"
|
||||
|
||||
// Write C++ code here.
|
||||
//
|
||||
|
|
|
@ -131,22 +131,29 @@ class LlamaState: ObservableObject {
|
|||
|
||||
messageLog += "\(text)"
|
||||
|
||||
while await llamaContext.n_cur < llamaContext.n_len {
|
||||
let result = await llamaContext.completion_loop()
|
||||
messageLog += "\(result)"
|
||||
Task.detached {
|
||||
while await llamaContext.n_cur < llamaContext.n_len {
|
||||
let result = await llamaContext.completion_loop()
|
||||
await MainActor.run {
|
||||
self.messageLog += "\(result)"
|
||||
}
|
||||
}
|
||||
|
||||
let t_end = DispatchTime.now().uptimeNanoseconds
|
||||
let t_generation = Double(t_end - t_heat_end) / self.NS_PER_S
|
||||
let tokens_per_second = Double(await llamaContext.n_len) / t_generation
|
||||
|
||||
await llamaContext.clear()
|
||||
|
||||
await MainActor.run {
|
||||
self.messageLog += """
|
||||
\n
|
||||
Done
|
||||
Heat up took \(t_heat)s
|
||||
Generated \(tokens_per_second) t/s\n
|
||||
"""
|
||||
}
|
||||
}
|
||||
|
||||
let t_end = DispatchTime.now().uptimeNanoseconds
|
||||
let t_generation = Double(t_end - t_heat_end) / NS_PER_S
|
||||
let tokens_per_second = Double(await llamaContext.n_len) / t_generation
|
||||
|
||||
await llamaContext.clear()
|
||||
messageLog += """
|
||||
\n
|
||||
Done
|
||||
Heat up took \(t_heat)s
|
||||
Generated \(tokens_per_second) t/s\n
|
||||
"""
|
||||
}
|
||||
|
||||
func bench() async {
|
||||
|
|
|
@ -30,8 +30,9 @@ if(TARGET BUILD_INFO)
|
|||
add_dependencies(llava BUILD_INFO)
|
||||
endif()
|
||||
|
||||
set(TARGET llava-cli)
|
||||
add_executable(llava-cli llava-cli.cpp)
|
||||
install(TARGETS llava-cli RUNTIME)
|
||||
target_link_libraries(llava-cli PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(llava PRIVATE cxx_std_11)
|
||||
set(TARGET llama-llava-cli)
|
||||
add_executable(${TARGET} llava-cli.cpp)
|
||||
set_target_properties(${TARGET} PROPERTIES OUTPUT_NAME llama-llava-cli)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llava ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
|
|
@ -9,12 +9,12 @@ The implementation is based on llava, and is compatible with llava and mobileVLM
|
|||
Notice: The overall process of model inference for both **MobileVLM** and **MobileVLM_V2** models is the same, but the process of model conversion is a little different. Therefore, using **MobileVLM-1.7B** as an example, the different conversion step will be shown.
|
||||
|
||||
## Usage
|
||||
Build with cmake or run `make llava-cli` to build it.
|
||||
Build with cmake or run `make llama-llava-cli` to build it.
|
||||
|
||||
After building, run: `./llava-cli` to see the usage. For example:
|
||||
After building, run: `./llama-llava-cli` to see the usage. For example:
|
||||
|
||||
```sh
|
||||
./llava-cli -m MobileVLM-1.7B/ggml-model-q4_k.gguf \
|
||||
./llama-llava-cli -m MobileVLM-1.7B/ggml-model-q4_k.gguf \
|
||||
--mmproj MobileVLM-1.7B/mmproj-model-f16.gguf \
|
||||
--image path/to/an/image.jpg \
|
||||
-p "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <image>\nWho is the author of this book? Answer the question using a single word or phrase. ASSISTANT:"
|
||||
|
@ -62,7 +62,7 @@ python ./examples/convert-legacy-llama.py path/to/MobileVLM-1.7B
|
|||
|
||||
5. Use `quantize` to convert LLaMA part's DataType from `fp16` to `q4_k`
|
||||
```sh
|
||||
./quantize path/to/MobileVLM-1.7B/ggml-model-f16.gguf path/to/MobileVLM-1.7B/ggml-model-q4_k.gguf q4_k_s
|
||||
./llama-quantize path/to/MobileVLM-1.7B/ggml-model-f16.gguf path/to/MobileVLM-1.7B/ggml-model-q4_k.gguf q4_k_s
|
||||
```
|
||||
|
||||
Now both the LLaMA part and the image encoder is in the `MobileVLM-1.7B` directory.
|
||||
|
@ -82,7 +82,7 @@ refer to `android/adb_run.sh`, modify resources' `name` and `path`
|
|||
### case 1
|
||||
**input**
|
||||
```sh
|
||||
/data/local/tmp/llava-cli \
|
||||
/data/local/tmp/llama-llava-cli \
|
||||
-m /data/local/tmp/ggml-model-q4_k.gguf \
|
||||
--mmproj /data/local/tmp/mmproj-model-f16.gguf \
|
||||
-t 4 \
|
||||
|
@ -102,7 +102,7 @@ llama_print_timings: total time = 34731.93 ms
|
|||
### case 2
|
||||
**input**
|
||||
```sh
|
||||
/data/local/tmp/llava-cli \
|
||||
/data/local/tmp/llama-llava-cli \
|
||||
-m /data/local/tmp/ggml-model-q4_k.gguf \
|
||||
--mmproj /data/local/tmp/mmproj-model-f16.gguf \
|
||||
-t 4 \
|
||||
|
@ -126,7 +126,7 @@ llama_print_timings: total time = 34570.79 ms
|
|||
#### llava-cli release-b2005
|
||||
**input**
|
||||
```sh
|
||||
/data/local/tmp/llava-cli \
|
||||
/data/local/tmp/llama-llava-cli \
|
||||
-m /data/local/tmp/ggml-model-q4_k.gguf \
|
||||
--mmproj /data/local/tmp/mmproj-model-f16.gguf \
|
||||
-t 4 \
|
||||
|
@ -194,13 +194,13 @@ llama_print_timings: total time = 44411.01 ms / 377 tokens
|
|||
## Orin compile and run
|
||||
### compile
|
||||
```sh
|
||||
make LLAMA_CUDA=1 CUDA_DOCKER_ARCH=sm_87 LLAMA_CUDA_F16=1 -j 32
|
||||
make GGML_CUDA=1 CUDA_DOCKER_ARCH=sm_87 GGML_CUDA_F16=1 -j 32
|
||||
```
|
||||
### run on Orin
|
||||
### case 1
|
||||
**input**
|
||||
```sh
|
||||
./llava-cli \
|
||||
./llama-llava-cli \
|
||||
-m /data/local/tmp/ggml-model-q4_k.gguf \
|
||||
--mmproj /data/local/tmp/mmproj-model-f16.gguf \
|
||||
--image /data/local/tmp/demo.jpeg \
|
||||
|
@ -224,7 +224,7 @@ llama_print_timings: total time = 1352.63 ms / 252 tokens
|
|||
### case 2
|
||||
**input**
|
||||
```sh
|
||||
./llava-cli \
|
||||
./llama-llava-cli \
|
||||
-m /data/local/tmp/ggml-model-q4_k.gguf \
|
||||
--mmproj /data/local/tmp/mmproj-model-f16.gguf \
|
||||
-p "A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <image>\nWhat is in the image? ASSISTANT:" \
|
||||
|
|
|
@ -11,12 +11,12 @@ For llava-1.6 a variety of prepared gguf models are available as well [7b-34b](h
|
|||
After API is confirmed, more models will be supported / uploaded.
|
||||
|
||||
## Usage
|
||||
Build with cmake or run `make llava-cli` to build it.
|
||||
Build with cmake or run `make llama-llava-cli` to build it.
|
||||
|
||||
After building, run: `./llava-cli` to see the usage. For example:
|
||||
After building, run: `./llama-llava-cli` to see the usage. For example:
|
||||
|
||||
```sh
|
||||
./llava-cli -m ../llava-v1.5-7b/ggml-model-f16.gguf --mmproj ../llava-v1.5-7b/mmproj-model-f16.gguf --image path/to/an/image.jpg
|
||||
./llama-llava-cli -m ../llava-v1.5-7b/ggml-model-f16.gguf --mmproj ../llava-v1.5-7b/mmproj-model-f16.gguf --image path/to/an/image.jpg
|
||||
```
|
||||
|
||||
**note**: A lower temperature like 0.1 is recommended for better quality. add `--temp 0.1` to the command to do so.
|
||||
|
@ -95,9 +95,9 @@ python ./examples/llava/convert-image-encoder-to-gguf.py -m vit --llava-projecto
|
|||
python ./examples/convert-legacy-llama.py ../llava-v1.6-vicuna-7b/ --skip-unknown
|
||||
```
|
||||
|
||||
7) And finally we can run the llava-cli using the 1.6 model version:
|
||||
7) And finally we can run the llava cli using the 1.6 model version:
|
||||
```console
|
||||
./llava-cli -m ../llava-v1.6-vicuna-7b/ggml-model-f16.gguf --mmproj vit/mmproj-model-f16.gguf --image some-image.jpg -c 4096
|
||||
./llama-llava-cli -m ../llava-v1.6-vicuna-7b/ggml-model-f16.gguf --mmproj vit/mmproj-model-f16.gguf --image some-image.jpg -c 4096
|
||||
```
|
||||
|
||||
**note** llava-1.6 needs more context than llava-1.5, at least 3000 is needed (just run it at -c 4096)
|
||||
|
|
|
@ -10,7 +10,7 @@ prompt="A chat between a curious user and an artificial intelligence assistant.
|
|||
# prompt="A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user's questions. USER: <image>\nWhat is in the image? ASSISTANT:"
|
||||
|
||||
program_dir="build_64/bin"
|
||||
binName="llava-cli"
|
||||
binName="llama-llava-cli"
|
||||
n_threads=4
|
||||
|
||||
|
||||
|
|
|
@ -1121,20 +1121,20 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||
}
|
||||
if (n < 32)
|
||||
hparams.image_grid_pinpoints[n] = 0;
|
||||
} catch (std::runtime_error & e) {
|
||||
} catch (std::runtime_error & /*e*/) {
|
||||
hparams.image_grid_pinpoints[0]=0;
|
||||
}
|
||||
|
||||
try {
|
||||
int idx = get_key_idx(ctx, KEY_MM_PATCH_MERGE_TYPE);
|
||||
strcpy(hparams.mm_patch_merge_type, gguf_get_val_str(ctx, idx));
|
||||
} catch (std::runtime_error & e) {
|
||||
} catch (std::runtime_error & /*e*/) {
|
||||
strcpy(hparams.mm_patch_merge_type, "flat");
|
||||
}
|
||||
|
||||
try {
|
||||
hparams.image_crop_resolution = get_u32(ctx, KEY_IMAGE_CROP_RESOLUTION); // llava-1.6
|
||||
} catch(const std::exception& e) {
|
||||
} catch(const std::exception& /*e*/) {
|
||||
hparams.image_crop_resolution = hparams.image_size;
|
||||
}
|
||||
|
||||
|
@ -1173,7 +1173,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||
try {
|
||||
vision_model.class_embedding = get_tensor(new_clip->ctx_data, TN_CLASS_EMBD);
|
||||
new_clip->has_class_embedding = true;
|
||||
} catch (const std::exception& e) {
|
||||
} catch (const std::exception& /*e*/) {
|
||||
new_clip->has_class_embedding = false;
|
||||
}
|
||||
|
||||
|
@ -1181,7 +1181,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||
vision_model.pre_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "weight"));
|
||||
vision_model.pre_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_PRE, "v", "bias"));
|
||||
new_clip->has_pre_norm = true;
|
||||
} catch (std::exception & e) {
|
||||
} catch (std::exception & /*e*/) {
|
||||
new_clip->has_pre_norm = false;
|
||||
}
|
||||
|
||||
|
@ -1189,21 +1189,21 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||
vision_model.post_ln_w = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "weight"));
|
||||
vision_model.post_ln_b = get_tensor(new_clip->ctx_data, format(TN_LN_POST, "v", "bias"));
|
||||
new_clip->has_post_norm = true;
|
||||
} catch (std::exception & e) {
|
||||
} catch (std::exception & /*e*/) {
|
||||
new_clip->has_post_norm = false;
|
||||
}
|
||||
|
||||
try {
|
||||
vision_model.patch_bias = get_tensor(new_clip->ctx_data, TN_PATCH_BIAS);
|
||||
new_clip->has_patch_bias = true;
|
||||
} catch (std::exception & e) {
|
||||
} catch (std::exception & /*e*/) {
|
||||
new_clip->has_patch_bias = false;
|
||||
}
|
||||
|
||||
try {
|
||||
vision_model.patch_embeddings = get_tensor(new_clip->ctx_data, TN_PATCH_EMBD);
|
||||
vision_model.position_embeddings = get_tensor(new_clip->ctx_data, format(TN_POS_EMBD, "v"));
|
||||
} catch(const std::exception& e) {
|
||||
} catch(const std::exception& /*e*/) {
|
||||
LOG_TEE("%s: failed to load vision model tensors\n", __func__);
|
||||
}
|
||||
|
||||
|
@ -1215,26 +1215,26 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
|
|||
// Yi-type llava
|
||||
vision_model.mm_1_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "weight"));
|
||||
vision_model.mm_1_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 1, "bias"));
|
||||
} catch (std::runtime_error & e) { }
|
||||
} catch (std::runtime_error & /*e*/) { }
|
||||
try {
|
||||
// missing in Yi-type llava
|
||||
vision_model.mm_2_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "weight"));
|
||||
vision_model.mm_2_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 2, "bias"));
|
||||
} catch (std::runtime_error & e) { }
|
||||
} catch (std::runtime_error & /*e*/) { }
|
||||
try {
|
||||
// Yi-type llava
|
||||
vision_model.mm_3_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "weight"));
|
||||
vision_model.mm_3_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 3, "bias"));
|
||||
} catch (std::runtime_error & e) { }
|
||||
} catch (std::runtime_error & /*e*/) { }
|
||||
try {
|
||||
// Yi-type llava
|
||||
vision_model.mm_4_w = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "weight"));
|
||||
vision_model.mm_4_b = get_tensor(new_clip->ctx_data, format(TN_LLAVA_PROJ, 4, "bias"));
|
||||
} catch (std::runtime_error & e) { }
|
||||
} catch (std::runtime_error & /*e*/) { }
|
||||
try {
|
||||
vision_model.image_newline = get_tensor(new_clip->ctx_data, TN_IMAGE_NEWLINE);
|
||||
// LOG_TEE("%s: image_newline tensor (llava-1.6) found\n", __func__);
|
||||
} catch (std::runtime_error & e) { }
|
||||
} catch (std::runtime_error & /*e*/) { }
|
||||
} else if (new_clip->proj_type == PROJECTOR_TYPE_LDP) {
|
||||
// MobileVLM projection
|
||||
vision_model.mm_model_mlp_1_w = get_tensor(new_clip->ctx_data, format(TN_MVLM_PROJ_MLP, 1, "weight"));
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET lookahead)
|
||||
set(TARGET llama-lookahead)
|
||||
add_executable(${TARGET} lookahead.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -1,22 +1,22 @@
|
|||
set(TARGET lookup)
|
||||
set(TARGET llama-lookup)
|
||||
add_executable(${TARGET} lookup.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
||||
set(TARGET lookup-create)
|
||||
set(TARGET llama-lookup-create)
|
||||
add_executable(${TARGET} lookup-create.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
||||
set(TARGET lookup-merge)
|
||||
set(TARGET llama-lookup-merge)
|
||||
add_executable(${TARGET} lookup-merge.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
||||
set(TARGET lookup-stats)
|
||||
set(TARGET llama-lookup-stats)
|
||||
add_executable(${TARGET} lookup-stats.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -11,14 +11,14 @@
|
|||
#include <unordered_map>
|
||||
#include <vector>
|
||||
|
||||
static void print_usage() {
|
||||
static void print_usage(char* argv0) {
|
||||
fprintf(stderr, "Merges multiple lookup cache files into a single one.\n");
|
||||
fprintf(stderr, "Usage: lookup-merge [--help] lookup_part_1.bin lookup_part_2.bin ... lookup_merged.bin\n");
|
||||
fprintf(stderr, "Usage: %s [--help] lookup_part_1.bin lookup_part_2.bin ... lookup_merged.bin\n", argv0);
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv){
|
||||
if (argc < 3) {
|
||||
print_usage();
|
||||
print_usage(argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,7 @@ int main(int argc, char ** argv){
|
|||
for (int i = 0; i < argc-1; ++i) {
|
||||
args[i] = argv[i+1];
|
||||
if (args[i] == "-h" || args[i] == "--help") {
|
||||
print_usage();
|
||||
print_usage(argv[0]);
|
||||
exit(0);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
cmake_minimum_required(VERSION 3.12)
|
||||
project("main-cmake-pkg" C CXX)
|
||||
set(TARGET main-cmake-pkg)
|
||||
project("llama-cli-cmake-pkg" C CXX)
|
||||
set(TARGET llama-cli-cmake-pkg)
|
||||
|
||||
find_package(Llama 0.0.1 REQUIRED)
|
||||
|
||||
# Bake common functionality in with target. Because applications
|
||||
# using the relocatable Llama package should be outside of the
|
||||
# source tree, main-cmake-pkg pretends the dependencies are built-in.
|
||||
# source tree, llama-cli-cmake-pkg pretends the dependencies are built-in.
|
||||
set(_common_path "${CMAKE_CURRENT_LIST_DIR}/../../common")
|
||||
add_library(common OBJECT)
|
||||
file(GLOB _common_files
|
||||
|
@ -15,7 +15,7 @@ file(GLOB _common_files
|
|||
)
|
||||
target_sources(common PRIVATE ${_common_files})
|
||||
|
||||
# If the common project was part of "main-cmake-pkg" the transient
|
||||
# If the common project was part of "llama-cli-cmake-pkg" the transient
|
||||
# defines would automatically be attached. Because the common func-
|
||||
# tionality is separate, but dependent upon the defines, it must be
|
||||
# explicitly extracted from the "llama" target.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# llama.cpp/example/main-cmake-pkg
|
||||
|
||||
This program builds the [main](../main) application using a relocatable CMake package. It serves as an example of using the `find_package()` CMake command to conveniently include [llama.cpp](https://github.com/ggerganov/llama.cpp) in projects which live outside of the source tree.
|
||||
This program builds [llama-cli](../main) using a relocatable CMake package. It serves as an example of using the `find_package()` CMake command to conveniently include [llama.cpp](https://github.com/ggerganov/llama.cpp) in projects which live outside of the source tree.
|
||||
|
||||
## Building
|
||||
|
||||
|
@ -20,7 +20,7 @@ cmake --build build --config Release
|
|||
cmake --install build --prefix C:/LlamaCPP
|
||||
```
|
||||
|
||||
### Build main-cmake-pkg
|
||||
### Build llama-cli-cmake-pkg
|
||||
|
||||
|
||||
```cmd
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET main)
|
||||
set(TARGET llama-cli)
|
||||
add_executable(${TARGET} main.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# llama.cpp/example/main
|
||||
# llama.cpp/examples/main
|
||||
|
||||
This example program allows you to use various LLaMA language models in an easy and efficient way. It is specifically designed to work with the [llama.cpp](https://github.com/ggerganov/llama.cpp) project, which provides a plain C/C++ implementation with optional 4-bit quantization support for faster, lower memory inference, and is optimized for desktop CPUs. This program can be used to perform various inference tasks with LLaMA models, including generating text based on user-provided prompts and chat-like interactions with reverse prompts.
|
||||
|
||||
|
@ -20,13 +20,13 @@ To get started right away, run the following command, making sure to use the cor
|
|||
#### Unix-based systems (Linux, macOS, etc.):
|
||||
|
||||
```bash
|
||||
./main -m models/7B/ggml-model.bin --prompt "Once upon a time"
|
||||
./llama-cli -m models/7B/ggml-model.bin --prompt "Once upon a time"
|
||||
```
|
||||
|
||||
#### Windows:
|
||||
|
||||
```powershell
|
||||
main.exe -m models\7B\ggml-model.bin --prompt "Once upon a time"
|
||||
llama-cli.exe -m models\7B\ggml-model.bin --prompt "Once upon a time"
|
||||
```
|
||||
|
||||
For an interactive experience, try this command:
|
||||
|
@ -34,7 +34,7 @@ For an interactive experience, try this command:
|
|||
#### Unix-based systems (Linux, macOS, etc.):
|
||||
|
||||
```bash
|
||||
./main -m models/7B/ggml-model.bin -n -1 --color -r "User:" --in-prefix " " -i -p \
|
||||
./llama-cli -m models/7B/ggml-model.bin -n -1 --color -r "User:" --in-prefix " " -i -p \
|
||||
'User: Hi
|
||||
AI: Hello. I am an AI chatbot. Would you like to talk?
|
||||
User: Sure!
|
||||
|
@ -45,7 +45,7 @@ User:'
|
|||
#### Windows:
|
||||
|
||||
```powershell
|
||||
main.exe -m models\7B\ggml-model.bin -n -1 --color -r "User:" --in-prefix " " -i -e -p "User: Hi\nAI: Hello. I am an AI chatbot. Would you like to talk?\nUser: Sure!\nAI: What would you like to talk about?\nUser:"
|
||||
llama-cli.exe -m models\7B\ggml-model.bin -n -1 --color -r "User:" --in-prefix " " -i -e -p "User: Hi\nAI: Hello. I am an AI chatbot. Would you like to talk?\nUser: Sure!\nAI: What would you like to talk about?\nUser:"
|
||||
```
|
||||
|
||||
The following command generates "infinite" text from a starting prompt (you can use `Ctrl-C` to stop it):
|
||||
|
@ -53,18 +53,18 @@ The following command generates "infinite" text from a starting prompt (you can
|
|||
#### Unix-based systems (Linux, macOS, etc.):
|
||||
|
||||
```bash
|
||||
./main -m models/7B/ggml-model.bin --ignore-eos -n -1
|
||||
./llama-cli -m models/7B/ggml-model.bin --ignore-eos -n -1
|
||||
```
|
||||
|
||||
#### Windows:
|
||||
|
||||
```powershell
|
||||
main.exe -m models\7B\ggml-model.bin --ignore-eos -n -1
|
||||
llama-cli.exe -m models\7B\ggml-model.bin --ignore-eos -n -1
|
||||
```
|
||||
|
||||
## Common Options
|
||||
|
||||
In this section, we cover the most commonly used options for running the `main` program with the LLaMA models:
|
||||
In this section, we cover the most commonly used options for running the `llama-cli` program with the LLaMA models:
|
||||
|
||||
- `-m FNAME, --model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.gguf`; inferred from `--model-url` if set).
|
||||
- `-mu MODEL_URL --model-url MODEL_URL`: Specify a remote http url to download the file (e.g https://huggingface.co/ggml-org/models/resolve/main/phi-2/ggml-model-q4_0.gguf).
|
||||
|
@ -74,7 +74,7 @@ In this section, we cover the most commonly used options for running the `main`
|
|||
|
||||
## Input Prompts
|
||||
|
||||
The `main` program provides several ways to interact with the LLaMA models using input prompts:
|
||||
The `llama-cli` program provides several ways to interact with the LLaMA models using input prompts:
|
||||
|
||||
- `--prompt PROMPT`: Provide a prompt directly as a command-line option.
|
||||
- `--file FNAME`: Provide a file containing a prompt or multiple prompts.
|
||||
|
@ -82,7 +82,7 @@ The `main` program provides several ways to interact with the LLaMA models using
|
|||
|
||||
## Interaction
|
||||
|
||||
The `main` program offers a seamless way to interact with LLaMA models, allowing users to engage in real-time conversations or provide instructions for specific tasks. The interactive mode can be triggered using various options, including `--interactive` and `--interactive-first`.
|
||||
The `llama-cli` program offers a seamless way to interact with LLaMA models, allowing users to engage in real-time conversations or provide instructions for specific tasks. The interactive mode can be triggered using various options, including `--interactive` and `--interactive-first`.
|
||||
|
||||
In interactive mode, users can participate in text generation by injecting their input during the process. Users can press `Ctrl+C` at any time to interject and type their input, followed by pressing `Return` to submit it to the LLaMA model. To submit additional lines without finalizing input, users can end the current line with a backslash (`\`) and continue typing.
|
||||
|
||||
|
@ -107,7 +107,7 @@ To overcome this limitation, you can use the `--in-prefix` flag to add a space o
|
|||
The `--in-prefix` flag is used to add a prefix to your input, primarily, this is used to insert a space after the reverse prompt. Here's an example of how to use the `--in-prefix` flag in conjunction with the `--reverse-prompt` flag:
|
||||
|
||||
```sh
|
||||
./main -r "User:" --in-prefix " "
|
||||
./llama-cli -r "User:" --in-prefix " "
|
||||
```
|
||||
|
||||
### In-Suffix
|
||||
|
@ -115,7 +115,7 @@ The `--in-prefix` flag is used to add a prefix to your input, primarily, this is
|
|||
The `--in-suffix` flag is used to add a suffix after your input. This is useful for adding an "Assistant:" prompt after the user's input. It's added after the new-line character (`\n`) that's automatically added to the end of the user's input. Here's an example of how to use the `--in-suffix` flag in conjunction with the `--reverse-prompt` flag:
|
||||
|
||||
```sh
|
||||
./main -r "User:" --in-prefix " " --in-suffix "Assistant:"
|
||||
./llama-cli -r "User:" --in-prefix " " --in-suffix "Assistant:"
|
||||
```
|
||||
|
||||
## Context Management
|
||||
|
|
|
@ -39,12 +39,12 @@ static std::ostringstream * g_output_ss;
|
|||
static std::vector<llama_token> * g_output_tokens;
|
||||
static bool is_interacting = false;
|
||||
|
||||
static bool file_exists(const std::string &path) {
|
||||
static bool file_exists(const std::string & path) {
|
||||
std::ifstream f(path.c_str());
|
||||
return f.good();
|
||||
}
|
||||
|
||||
static bool file_is_empty(const std::string &path) {
|
||||
static bool file_is_empty(const std::string & path) {
|
||||
std::ifstream f;
|
||||
f.exceptions(std::ifstream::failbit | std::ifstream::badbit);
|
||||
f.open(path.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
|
||||
|
@ -117,6 +117,14 @@ static void llama_log_callback_logTee(ggml_log_level level, const char * text, v
|
|||
LOG_TEE("%s", text);
|
||||
}
|
||||
|
||||
static std::string chat_add_and_format(struct llama_model * model, std::vector<llama_chat_msg> & chat_msgs, std::string role, std::string content) {
|
||||
llama_chat_msg new_msg{role, content};
|
||||
auto formatted = llama_chat_format_single(
|
||||
model, g_params->chat_template, chat_msgs, new_msg, role == "user");
|
||||
chat_msgs.push_back({role, content});
|
||||
return formatted;
|
||||
}
|
||||
|
||||
int main(int argc, char ** argv) {
|
||||
gpt_params params;
|
||||
g_params = ¶ms;
|
||||
|
@ -190,6 +198,7 @@ int main(int argc, char ** argv) {
|
|||
llama_model * model;
|
||||
llama_context * ctx;
|
||||
llama_context * ctx_guidance = NULL;
|
||||
std::vector<llama_chat_msg> chat_msgs;
|
||||
g_model = &model;
|
||||
g_ctx = &ctx;
|
||||
|
||||
|
@ -215,6 +224,8 @@ int main(int argc, char ** argv) {
|
|||
__func__, n_ctx_train, n_ctx);
|
||||
}
|
||||
|
||||
LOG_TEE("%s: chat template example: %s\n", __func__, llama_chat_format_example(model, params.chat_template).c_str());
|
||||
|
||||
// print system information
|
||||
{
|
||||
LOG_TEE("\n");
|
||||
|
@ -249,16 +260,21 @@ int main(int argc, char ** argv) {
|
|||
|
||||
std::vector<llama_token> embd_inp;
|
||||
|
||||
if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
|
||||
LOG("tokenize the prompt\n");
|
||||
embd_inp = ::llama_tokenize(ctx, params.prompt, true, true);
|
||||
} else {
|
||||
LOG("use session tokens\n");
|
||||
embd_inp = session_tokens;
|
||||
}
|
||||
{
|
||||
auto prompt = (params.conversation && params.enable_chat_template)
|
||||
? chat_add_and_format(model, chat_msgs, "system", params.prompt) // format the system prompt in conversation mode
|
||||
: params.prompt;
|
||||
if (params.interactive_first || !params.prompt.empty() || session_tokens.empty()) {
|
||||
LOG("tokenize the prompt\n");
|
||||
embd_inp = ::llama_tokenize(ctx, prompt, true, true);
|
||||
} else {
|
||||
LOG("use session tokens\n");
|
||||
embd_inp = session_tokens;
|
||||
}
|
||||
|
||||
LOG("prompt: \"%s\"\n", log_tostr(params.prompt));
|
||||
LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
|
||||
LOG("prompt: \"%s\"\n", log_tostr(prompt));
|
||||
LOG("tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str());
|
||||
}
|
||||
|
||||
// Should not run without any tokens
|
||||
if (embd_inp.empty()) {
|
||||
|
@ -481,6 +497,7 @@ int main(int argc, char ** argv) {
|
|||
std::vector<int> input_tokens; g_input_tokens = &input_tokens;
|
||||
std::vector<int> output_tokens; g_output_tokens = &output_tokens;
|
||||
std::ostringstream output_ss; g_output_ss = &output_ss;
|
||||
std::ostringstream assistant_ss; // for storing current assistant message, used in conversation mode
|
||||
|
||||
// the first thing we will do is to output the prompt, so set color accordingly
|
||||
console::set_display(console::prompt);
|
||||
|
@ -798,11 +815,20 @@ int main(int argc, char ** argv) {
|
|||
is_antiprompt = true;
|
||||
}
|
||||
|
||||
if (params.enable_chat_template) {
|
||||
chat_add_and_format(model, chat_msgs, "assistant", assistant_ss.str());
|
||||
}
|
||||
is_interacting = true;
|
||||
printf("\n");
|
||||
}
|
||||
}
|
||||
|
||||
// if current token is not EOG, we add it to current assistant message
|
||||
if (params.conversation) {
|
||||
auto id = llama_sampling_last(ctx_sampling);
|
||||
assistant_ss << llama_token_to_piece(ctx, id, false);
|
||||
}
|
||||
|
||||
if (n_past > 0 && is_interacting) {
|
||||
LOG("waiting for user input\n");
|
||||
|
||||
|
@ -853,8 +879,13 @@ int main(int argc, char ** argv) {
|
|||
string_process_escapes(buffer);
|
||||
}
|
||||
|
||||
bool format_chat = params.conversation && params.enable_chat_template;
|
||||
std::string user_inp = format_chat
|
||||
? chat_add_and_format(model, chat_msgs, "user", std::move(buffer))
|
||||
: std::move(buffer);
|
||||
// TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
|
||||
const auto line_pfx = ::llama_tokenize(ctx, params.input_prefix, false, true);
|
||||
const auto line_inp = ::llama_tokenize(ctx, buffer, false, false);
|
||||
const auto line_inp = ::llama_tokenize(ctx, user_inp, false, format_chat);
|
||||
const auto line_sfx = ::llama_tokenize(ctx, params.input_suffix, false, true);
|
||||
|
||||
LOG("input tokens: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, line_inp).c_str());
|
||||
|
@ -869,6 +900,9 @@ int main(int argc, char ** argv) {
|
|||
output_ss << llama_token_to_piece(ctx, token);
|
||||
}
|
||||
|
||||
// reset assistant message
|
||||
assistant_ss.str("");
|
||||
|
||||
n_remain -= line_inp.size();
|
||||
LOG("n_remain: %d\n", n_remain);
|
||||
} else {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET parallel)
|
||||
set(TARGET llama-parallel)
|
||||
add_executable(${TARGET} parallel.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET passkey)
|
||||
set(TARGET llama-passkey)
|
||||
add_executable(${TARGET} passkey.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -8,5 +8,5 @@ See the following PRs for more info:
|
|||
### Usage
|
||||
|
||||
```bash
|
||||
make -j && ./passkey -m ./models/llama-7b-v2/ggml-model-f16.gguf --junk 250
|
||||
make -j && ./llama-passkey -m ./models/llama-7b-v2/ggml-model-f16.gguf --junk 250
|
||||
```
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET perplexity)
|
||||
set(TARGET llama-perplexity)
|
||||
add_executable(${TARGET} perplexity.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -476,7 +476,7 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par
|
|||
}
|
||||
|
||||
// Download: https://huggingface.co/datasets/ggml-org/ci/resolve/main/wikitext-2-raw-v1.zip
|
||||
// Run `./perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
|
||||
// Run `./llama-perplexity -m models/7B/ggml-model-q4_0.bin -f wiki.test.raw`
|
||||
// Output: `perplexity: 13.5106 [114/114]`
|
||||
// BOS tokens will be added for each chunk before eval
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET quantize-stats)
|
||||
set(TARGET llama-quantize-stats)
|
||||
add_executable(${TARGET} quantize-stats.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE llama build_info ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET quantize)
|
||||
set(TARGET llama-quantize)
|
||||
add_executable(${TARGET} quantize.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE llama common ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -16,41 +16,41 @@ struct quant_option {
|
|||
};
|
||||
|
||||
static const std::vector<struct quant_option> QUANT_OPTIONS = {
|
||||
{ "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 3.56G, +0.2166 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 3.90G, +0.1585 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 4.33G, +0.0683 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q5_1", LLAMA_FTYPE_MOSTLY_Q5_1, " 4.70G, +0.0349 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q4_0", LLAMA_FTYPE_MOSTLY_Q4_0, " 4.34G, +0.4685 ppl @ Llama-3-8B", },
|
||||
{ "Q4_1", LLAMA_FTYPE_MOSTLY_Q4_1, " 4.78G, +0.4511 ppl @ Llama-3-8B", },
|
||||
{ "Q5_0", LLAMA_FTYPE_MOSTLY_Q5_0, " 5.21G, +0.1316 ppl @ Llama-3-8B", },
|
||||
{ "Q5_1", LLAMA_FTYPE_MOSTLY_Q5_1, " 5.65G, +0.1062 ppl @ Llama-3-8B", },
|
||||
{ "IQ2_XXS",LLAMA_FTYPE_MOSTLY_IQ2_XXS," 2.06 bpw quantization", },
|
||||
{ "IQ2_XS", LLAMA_FTYPE_MOSTLY_IQ2_XS, " 2.31 bpw quantization", },
|
||||
{ "IQ2_S", LLAMA_FTYPE_MOSTLY_IQ2_S, " 2.5 bpw quantization", },
|
||||
{ "IQ2_M", LLAMA_FTYPE_MOSTLY_IQ2_M, " 2.7 bpw quantization", },
|
||||
{ "IQ1_S", LLAMA_FTYPE_MOSTLY_IQ1_S, " 1.56 bpw quantization", },
|
||||
{ "IQ1_M", LLAMA_FTYPE_MOSTLY_IQ1_M, " 1.75 bpw quantization", },
|
||||
{ "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.63G, +0.6717 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q2_K_S", LLAMA_FTYPE_MOSTLY_Q2_K_S, " 2.16G, +9.0634 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q2_K", LLAMA_FTYPE_MOSTLY_Q2_K, " 2.96G, +3.5199 ppl @ Llama-3-8B", },
|
||||
{ "Q2_K_S", LLAMA_FTYPE_MOSTLY_Q2_K_S, " 2.96G, +3.1836 ppl @ Llama-3-8B", },
|
||||
{ "IQ3_XXS",LLAMA_FTYPE_MOSTLY_IQ3_XXS," 3.06 bpw quantization", },
|
||||
{ "IQ3_S", LLAMA_FTYPE_MOSTLY_IQ3_S, " 3.44 bpw quantization", },
|
||||
{ "IQ3_M", LLAMA_FTYPE_MOSTLY_IQ3_M, " 3.66 bpw quantization mix", },
|
||||
{ "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" },
|
||||
{ "IQ3_XS", LLAMA_FTYPE_MOSTLY_IQ3_XS, " 3.3 bpw quantization" , },
|
||||
{ "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 2.75G, +0.5551 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M, " 3.07G, +0.2496 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 3.35G, +0.1764 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" },
|
||||
{ "IQ3_XS", LLAMA_FTYPE_MOSTLY_IQ3_XS, " 3.3 bpw quantization", },
|
||||
{ "Q3_K_S", LLAMA_FTYPE_MOSTLY_Q3_K_S, " 3.41G, +1.6321 ppl @ Llama-3-8B", },
|
||||
{ "Q3_K_M", LLAMA_FTYPE_MOSTLY_Q3_K_M, " 3.74G, +0.6569 ppl @ Llama-3-8B", },
|
||||
{ "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 4.03G, +0.5562 ppl @ Llama-3-8B", },
|
||||
{ "IQ4_NL", LLAMA_FTYPE_MOSTLY_IQ4_NL, " 4.50 bpw non-linear quantization", },
|
||||
{ "IQ4_XS", LLAMA_FTYPE_MOSTLY_IQ4_XS, " 4.25 bpw non-linear quantization", },
|
||||
{ "Q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M, "alias for Q4_K_M", },
|
||||
{ "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 3.59G, +0.0992 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M, " 3.80G, +0.0532 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M, "alias for Q5_K_M", },
|
||||
{ "Q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S, " 4.33G, +0.0400 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 4.45G, +0.0122 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 5.15G, +0.0008 ppl @ LLaMA-v1-7B", },
|
||||
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 6.70G, +0.0004 ppl @ LLaMA-v1-7B", },
|
||||
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, -0.0020 ppl @ Mistral-7B", },
|
||||
{ "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
|
||||
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
|
||||
{ "Q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M, "alias for Q4_K_M", },
|
||||
{ "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 4.37G, +0.2689 ppl @ Llama-3-8B", },
|
||||
{ "Q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M, " 4.58G, +0.1754 ppl @ Llama-3-8B", },
|
||||
{ "Q5_K", LLAMA_FTYPE_MOSTLY_Q5_K_M, "alias for Q5_K_M", },
|
||||
{ "Q5_K_S", LLAMA_FTYPE_MOSTLY_Q5_K_S, " 5.21G, +0.1049 ppl @ Llama-3-8B", },
|
||||
{ "Q5_K_M", LLAMA_FTYPE_MOSTLY_Q5_K_M, " 5.33G, +0.0569 ppl @ Llama-3-8B", },
|
||||
{ "Q6_K", LLAMA_FTYPE_MOSTLY_Q6_K, " 6.14G, +0.0217 ppl @ Llama-3-8B", },
|
||||
{ "Q8_0", LLAMA_FTYPE_MOSTLY_Q8_0, " 7.96G, +0.0026 ppl @ Llama-3-8B", },
|
||||
{ "F16", LLAMA_FTYPE_MOSTLY_F16, "14.00G, +0.0020 ppl @ Mistral-7B", },
|
||||
{ "BF16", LLAMA_FTYPE_MOSTLY_BF16, "14.00G, -0.0050 ppl @ Mistral-7B", },
|
||||
{ "F32", LLAMA_FTYPE_ALL_F32, "26.00G @ 7B", },
|
||||
// Note: Ensure COPY comes after F32 to avoid ftype 0 from matching.
|
||||
{ "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },
|
||||
{ "COPY", LLAMA_FTYPE_ALL_F32, "only copy tensors, no quantizing", },
|
||||
};
|
||||
|
||||
static const char * const LLM_KV_QUANTIZE_IMATRIX_FILE = "quantize.imatrix.file";
|
||||
|
|
|
@ -18,9 +18,9 @@ fi
|
|||
|
||||
set -x
|
||||
|
||||
SPLIT=$1/gguf-split
|
||||
QUANTIZE=$1/quantize
|
||||
MAIN=$1/main
|
||||
SPLIT=$1/llama-gguf-split
|
||||
QUANTIZE=$1/llama-quantize
|
||||
MAIN=$1/llama-cli
|
||||
WORK_PATH=$TMP_DIR/quantize
|
||||
ROOT_DIR=$(realpath $(dirname $0)/../../)
|
||||
|
||||
|
|
|
@ -8,7 +8,7 @@ if [ "$1" == "-m" ]; then
|
|||
MODEL="-m $2 "
|
||||
fi
|
||||
|
||||
./main $MODEL --color \
|
||||
./llama-cli $MODEL --color \
|
||||
-f ./prompts/reason-act.txt \
|
||||
-i --interactive-first \
|
||||
--top_k 10000 --temp 0.2 --repeat_penalty 1 -t 7 -c 2048 \
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET retrieval)
|
||||
set(TARGET llama-retrieval)
|
||||
add_executable(${TARGET} retrieval.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -15,7 +15,7 @@ https://github.com/ggerganov/llama.cpp/pull/6193
|
|||
`retrieval` example can be tested as follows:
|
||||
|
||||
```bash
|
||||
make -j && ./retrieval --model ./models/bge-base-en-v1.5-f16.gguf --top-k 3 --context-file README.md --context-file License --chunk-size 100 --chunk-separator .
|
||||
make -j && ./llama-retrieval --model ./models/bge-base-en-v1.5-f16.gguf --top-k 3 --context-file README.md --context-file License --chunk-size 100 --chunk-separator .
|
||||
```
|
||||
|
||||
This chunks and embeds all given files and starts a loop requesting query inputs:
|
||||
|
|
|
@ -73,9 +73,10 @@ static std::vector<chunk> chunk_file(const std::string & filename, int chunk_siz
|
|||
return chunks;
|
||||
}
|
||||
|
||||
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, int seq_id) {
|
||||
for (size_t i = 0; i < tokens.size(); i++) {
|
||||
llama_batch_add(batch, tokens[i], i, { seq_id }, i == tokens.size() - 1);
|
||||
static void batch_add_seq(llama_batch & batch, const std::vector<int32_t> & tokens, llama_seq_id seq_id) {
|
||||
size_t n_tokens = tokens.size();
|
||||
for (size_t i = 0; i < n_tokens; i++) {
|
||||
llama_batch_add(batch, tokens[i], i, { seq_id }, true);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -160,6 +161,12 @@ int main(int argc, char ** argv) {
|
|||
const int n_ctx_train = llama_n_ctx_train(model);
|
||||
const int n_ctx = llama_n_ctx(ctx);
|
||||
|
||||
const enum llama_pooling_type pooling_type = llama_pooling_type(ctx);
|
||||
if (pooling_type == LLAMA_POOLING_TYPE_NONE) {
|
||||
fprintf(stderr, "%s: error: pooling type NONE not supported\n", __func__);
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (n_ctx > n_ctx_train) {
|
||||
fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n",
|
||||
__func__, n_ctx_train, n_ctx);
|
||||
|
|
|
@ -29,13 +29,13 @@ You can also run multiple `rpc-server` instances on the same host, each with a d
|
|||
|
||||
## Usage
|
||||
|
||||
On each host, build the corresponding backend with `cmake` and add `-DLLAMA_RPC=ON` to the build options.
|
||||
On each host, build the corresponding backend with `cmake` and add `-DGGML_RPC=ON` to the build options.
|
||||
For example, to build the CUDA backend with RPC support:
|
||||
|
||||
```bash
|
||||
mkdir build-rpc-cuda
|
||||
cd build-rpc-cuda
|
||||
cmake .. -DLLAMA_CUDA=ON -DLLAMA_RPC=ON
|
||||
cmake .. -DGGML_CUDA=ON -DGGML_RPC=ON
|
||||
cmake --build . --config Release
|
||||
```
|
||||
|
||||
|
@ -58,17 +58,17 @@ $ CUDA_VISIBLE_DEVICES=0 bin/rpc-server -p 50052
|
|||
This way you can run multiple `rpc-server` instances on the same host, each with a different CUDA device.
|
||||
|
||||
|
||||
On the main host build `llama.cpp` only with `-DLLAMA_RPC=ON`:
|
||||
On the main host build `llama.cpp` only with `-DGGML_RPC=ON`:
|
||||
|
||||
```bash
|
||||
mkdir build-rpc
|
||||
cd build-rpc
|
||||
cmake .. -DLLAMA_RPC=ON
|
||||
cmake .. -DGGML_RPC=ON
|
||||
cmake --build . --config Release
|
||||
```
|
||||
|
||||
Finally, use the `--rpc` option to specify the host and port of each `rpc-server`:
|
||||
|
||||
```bash
|
||||
$ bin/main -m ../models/tinyllama-1b/ggml-model-f16.gguf -p "Hello, my name is" --repeat-penalty 1.0 -n 64 --rpc 192.168.88.10:50052,192.168.88.11:50052 -ngl 99
|
||||
$ bin/llama-cli -m ../models/tinyllama-1b/ggml-model-f16.gguf -p "Hello, my name is" --repeat-penalty 1.0 -n 64 --rpc 192.168.88.10:50052,192.168.88.11:50052 -ngl 99
|
||||
```
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
set(TARGET save-load-state)
|
||||
set(TARGET llama-save-load-state)
|
||||
add_executable(${TARGET} save-load-state.cpp)
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
|
|
@ -16,7 +16,7 @@ GEN_OPTIONS="${GEN_OPTIONS:---ctx_size 4096 --batch-size 1024}"
|
|||
|
||||
|
||||
# shellcheck disable=SC2086 # Intended splitting of GEN_OPTIONS
|
||||
./server $GEN_OPTIONS \
|
||||
./llama-server $GEN_OPTIONS \
|
||||
--model "$MODEL" \
|
||||
--threads "$N_THREAD" \
|
||||
--rope-freq-scale 1.0 \
|
||||
|
|
|
@ -1,7 +1,14 @@
|
|||
set(TARGET server)
|
||||
set(TARGET llama-server)
|
||||
option(LLAMA_SERVER_VERBOSE "Build verbose logging option for Server" ON)
|
||||
option(LLAMA_SERVER_SSL "Build SSL support for the server" OFF)
|
||||
option(LLAMA_SERVER_SSL "Build SSL support for the server" OFF)
|
||||
|
||||
include_directories(${CMAKE_CURRENT_SOURCE_DIR} ${CMAKE_CURRENT_BINARY_DIR})
|
||||
|
||||
if (MINGW)
|
||||
# fix: https://github.com/ggerganov/llama.cpp/actions/runs/9651004652/job/26617901362?pr=8006
|
||||
add_compile_definitions(_WIN32_WINNT=${GGML_WIN_VER})
|
||||
endif()
|
||||
|
||||
set(TARGET_SRCS
|
||||
server.cpp
|
||||
utils.hpp
|
||||
|
@ -24,6 +31,7 @@ set(PUBLIC_ASSETS
|
|||
prompt-formats.js
|
||||
json-schema-to-grammar.mjs
|
||||
)
|
||||
|
||||
foreach(asset ${PUBLIC_ASSETS})
|
||||
set(input "${CMAKE_CURRENT_SOURCE_DIR}/public/${asset}")
|
||||
set(output "${CMAKE_CURRENT_BINARY_DIR}/${asset}.hpp")
|
||||
|
@ -34,18 +42,23 @@ foreach(asset ${PUBLIC_ASSETS})
|
|||
COMMAND "${CMAKE_COMMAND}" "-DINPUT=${input}" "-DOUTPUT=${output}" -P "${PROJECT_SOURCE_DIR}/scripts/xxd.cmake"
|
||||
)
|
||||
endforeach()
|
||||
|
||||
add_executable(${TARGET} ${TARGET_SRCS})
|
||||
install(TARGETS ${TARGET} RUNTIME)
|
||||
target_compile_definitions(${TARGET} PRIVATE
|
||||
SERVER_VERBOSE=$<BOOL:${LLAMA_SERVER_VERBOSE}>
|
||||
)
|
||||
|
||||
target_link_libraries(${TARGET} PRIVATE common ${CMAKE_THREAD_LIBS_INIT})
|
||||
|
||||
if (LLAMA_SERVER_SSL)
|
||||
find_package(OpenSSL REQUIRED)
|
||||
target_link_libraries(${TARGET} PRIVATE OpenSSL::SSL OpenSSL::Crypto)
|
||||
target_compile_definitions(${TARGET} PRIVATE CPPHTTPLIB_OPENSSL_SUPPORT)
|
||||
endif()
|
||||
|
||||
if (WIN32)
|
||||
TARGET_LINK_LIBRARIES(${TARGET} PRIVATE ws2_32)
|
||||
endif()
|
||||
|
||||
target_compile_features(${TARGET} PRIVATE cxx_std_11)
|
||||
|
|
|
@ -73,6 +73,7 @@ The project is under active development, and we are [looking for feedback and co
|
|||
- `-fa`, `--flash-attn` : enable flash attention (default: disabled).
|
||||
- `-ctk TYPE`, `--cache-type-k TYPE` : KV cache data type for K (default: `f16`, options `f32`, `f16`, `q8_0`, `q4_0`, `q4_1`, `iq4_nl`, `q5_0`, or `q5_1`)
|
||||
- `-ctv TYPE`, `--cache-type-v TYPE` : KV cache type for V (default `f16`, see `-ctk` for options)
|
||||
- `--spm-infill` : Use Suffix/Prefix/Middle pattern for infill (instead of Prefix/Suffix/Middle) as some models prefer this.
|
||||
|
||||
**If compiled with `LLAMA_SERVER_SSL=ON`**
|
||||
- `--ssl-key-file FNAME`: path to file a PEM-encoded SSL private key
|
||||
|
@ -80,26 +81,26 @@ The project is under active development, and we are [looking for feedback and co
|
|||
|
||||
## Build
|
||||
|
||||
`server` is built alongside everything else from the root of the project
|
||||
`llama-server` is built alongside everything else from the root of the project
|
||||
|
||||
- Using `make`:
|
||||
|
||||
```bash
|
||||
make server
|
||||
make llama-server
|
||||
```
|
||||
|
||||
- Using `CMake`:
|
||||
|
||||
```bash
|
||||
cmake -B build
|
||||
cmake --build build --config Release -t server
|
||||
cmake --build build --config Release -t llama-server
|
||||
```
|
||||
|
||||
Binary is at `./build/bin/server`
|
||||
Binary is at `./build/bin/llama-server`
|
||||
|
||||
## Build with SSL
|
||||
|
||||
`server` can also be built with SSL support using OpenSSL 3
|
||||
`llama-server` can also be built with SSL support using OpenSSL 3
|
||||
|
||||
- Using `make`:
|
||||
|
||||
|
@ -107,14 +108,14 @@ The project is under active development, and we are [looking for feedback and co
|
|||
# NOTE: For non-system openssl, use the following:
|
||||
# CXXFLAGS="-I /path/to/openssl/include"
|
||||
# LDFLAGS="-L /path/to/openssl/lib"
|
||||
make LLAMA_SERVER_SSL=true server
|
||||
make LLAMA_SERVER_SSL=true llama-server
|
||||
```
|
||||
|
||||
- Using `CMake`:
|
||||
|
||||
```bash
|
||||
cmake -B build -DLLAMA_SERVER_SSL=ON
|
||||
cmake --build build --config Release -t server
|
||||
cmake --build build --config Release -t llama-server
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
@ -124,13 +125,13 @@ To get started right away, run the following command, making sure to use the cor
|
|||
### Unix-based systems (Linux, macOS, etc.)
|
||||
|
||||
```bash
|
||||
./server -m models/7B/ggml-model.gguf -c 2048
|
||||
./llama-server -m models/7B/ggml-model.gguf -c 2048
|
||||
```
|
||||
|
||||
### Windows
|
||||
|
||||
```powershell
|
||||
server.exe -m models\7B\ggml-model.gguf -c 2048
|
||||
llama-server.exe -m models\7B\ggml-model.gguf -c 2048
|
||||
```
|
||||
|
||||
The above command will start a server that by default listens on `127.0.0.1:8080`.
|
||||
|
@ -629,11 +630,11 @@ bash chat.sh
|
|||
|
||||
### OAI-like API
|
||||
|
||||
The HTTP `server` supports an OAI-like API: https://github.com/openai/openai-openapi
|
||||
The HTTP `llama-server` supports an OAI-like API: https://github.com/openai/openai-openapi
|
||||
|
||||
### API errors
|
||||
|
||||
`server` returns errors in the same format as OAI: https://github.com/openai/openai-openapi
|
||||
`llama-server` returns errors in the same format as OAI: https://github.com/openai/openai-openapi
|
||||
|
||||
Example of an error:
|
||||
|
||||
|
|
|
@ -99,7 +99,7 @@ The `bench.py` script does several steps:
|
|||
It aims to be used in the CI, but you can run it manually:
|
||||
|
||||
```shell
|
||||
LLAMA_SERVER_BIN_PATH=../../../cmake-build-release/bin/server python bench.py \
|
||||
LLAMA_SERVER_BIN_PATH=../../../cmake-build-release/bin/llama-server python bench.py \
|
||||
--runner-label local \
|
||||
--name local \
|
||||
--branch `git rev-parse --abbrev-ref HEAD` \
|
||||
|
|
|
@ -245,7 +245,7 @@ def start_server(args):
|
|||
|
||||
def start_server_background(args):
|
||||
# Start the server
|
||||
server_path = '../../../build/bin/server'
|
||||
server_path = '../../../build/bin/llama-server'
|
||||
if 'LLAMA_SERVER_BIN_PATH' in os.environ:
|
||||
server_path = os.environ['LLAMA_SERVER_BIN_PATH']
|
||||
server_args = [
|
||||
|
|
|
@ -634,12 +634,12 @@ return html`
|
|||
<div>
|
||||
<div class="grammar">
|
||||
<label for="template"></label>
|
||||
<textarea id="grammar" name="grammar" placeholder="Use GBNF or JSON-Scheme + Converter" value="${params.value.grammar}" rows=4 oninput=${updateParams}/>
|
||||
<textarea id="grammar" name="grammar" placeholder="Use GBNF or JSON Schema + Converter" value="${params.value.grammar}" rows=4 oninput=${updateParams}/>
|
||||
</div>
|
||||
<div class="grammar-columns">
|
||||
<div class="json-schema-controls">
|
||||
<input type="text" name="prop-order" placeholder="Order: prop1,prop2,prop3" oninput=${updateGrammarJsonSchemaPropOrder} />
|
||||
<button type="button" class="button-grammar" onclick=${convertJSONSchemaGrammar}>Convert JSON-Scheme</button>
|
||||
<button type="button" class="button-grammar" onclick=${convertJSONSchemaGrammar}>Convert JSON Schema</button>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
|
|
@ -24,6 +24,201 @@ function _buildRepetition(itemRule, minItems, maxItems, opts={}) {
|
|||
return minItems === 0 ? `(${result})?` : result;
|
||||
}
|
||||
|
||||
function _generateMinMaxInt(minValue, maxValue, out, decimalsLeft = 16, topLevel = true) {
|
||||
const hasMin = minValue !== null;
|
||||
const hasMax = maxValue !== null;
|
||||
|
||||
function digitRange(fromChar, toChar) {
|
||||
out.push("[");
|
||||
if (fromChar === toChar) {
|
||||
out.push(fromChar);
|
||||
} else {
|
||||
out.push(fromChar);
|
||||
out.push("-");
|
||||
out.push(toChar);
|
||||
}
|
||||
out.push("]");
|
||||
}
|
||||
|
||||
function moreDigits(minDigits, maxDigits) {
|
||||
out.push("[0-9]");
|
||||
if (minDigits === maxDigits && minDigits === 1) {
|
||||
return;
|
||||
}
|
||||
out.push("{");
|
||||
out.push(minDigits.toString());
|
||||
if (maxDigits !== minDigits) {
|
||||
out.push(",");
|
||||
if (maxDigits !== Number.MAX_SAFE_INTEGER) {
|
||||
out.push(maxDigits.toString());
|
||||
}
|
||||
}
|
||||
out.push("}");
|
||||
}
|
||||
|
||||
function uniformRange(fromStr, toStr) {
|
||||
let i = 0;
|
||||
while (i < fromStr.length && fromStr[i] === toStr[i]) {
|
||||
i++;
|
||||
}
|
||||
if (i > 0) {
|
||||
out.push("\"");
|
||||
out.push(fromStr.slice(0, i));
|
||||
out.push("\"");
|
||||
}
|
||||
if (i < fromStr.length) {
|
||||
if (i > 0) {
|
||||
out.push(" ");
|
||||
}
|
||||
const subLen = fromStr.length - i - 1;
|
||||
if (subLen > 0) {
|
||||
const fromSub = fromStr.slice(i + 1);
|
||||
const toSub = toStr.slice(i + 1);
|
||||
const subZeros = "0".repeat(subLen);
|
||||
const subNines = "9".repeat(subLen);
|
||||
|
||||
let toReached = false;
|
||||
out.push("(");
|
||||
if (fromSub === subZeros) {
|
||||
digitRange(fromStr[i], String.fromCharCode(toStr.charCodeAt(i) - 1));
|
||||
out.push(" ");
|
||||
moreDigits(subLen, subLen);
|
||||
} else {
|
||||
out.push("[");
|
||||
out.push(fromStr[i]);
|
||||
out.push("] ");
|
||||
out.push("(");
|
||||
uniformRange(fromSub, subNines);
|
||||
out.push(")");
|
||||
if (fromStr.charCodeAt(i) < toStr.charCodeAt(i) - 1) {
|
||||
out.push(" | ");
|
||||
if (toSub === subNines) {
|
||||
digitRange(String.fromCharCode(fromStr.charCodeAt(i) + 1), toStr[i]);
|
||||
toReached = true;
|
||||
} else {
|
||||
digitRange(String.fromCharCode(fromStr.charCodeAt(i) + 1), String.fromCharCode(toStr.charCodeAt(i) - 1));
|
||||
}
|
||||
out.push(" ");
|
||||
moreDigits(subLen, subLen);
|
||||
}
|
||||
}
|
||||
if (!toReached) {
|
||||
out.push(" | ");
|
||||
digitRange(toStr[i], toStr[i]);
|
||||
out.push(" ");
|
||||
uniformRange(subZeros, toSub);
|
||||
}
|
||||
out.push(")");
|
||||
} else {
|
||||
out.push("[");
|
||||
out.push(fromStr[i]);
|
||||
out.push("-");
|
||||
out.push(toStr[i]);
|
||||
out.push("]");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (hasMin && hasMax) {
|
||||
if (minValue < 0 && maxValue < 0) {
|
||||
out.push("\"-\" (");
|
||||
_generateMinMaxInt(-maxValue, -minValue, out, decimalsLeft, true);
|
||||
out.push(")");
|
||||
return;
|
||||
}
|
||||
|
||||
if (minValue < 0) {
|
||||
out.push("\"-\" (");
|
||||
_generateMinMaxInt(0, -minValue, out, decimalsLeft, true);
|
||||
out.push(") | ");
|
||||
minValue = 0;
|
||||
}
|
||||
|
||||
let minS = minValue.toString();
|
||||
const maxS = maxValue.toString();
|
||||
const minDigits = minS.length;
|
||||
const maxDigits = maxS.length;
|
||||
|
||||
for (let digits = minDigits; digits < maxDigits; digits++) {
|
||||
uniformRange(minS, "9".repeat(digits));
|
||||
minS = "1" + "0".repeat(digits);
|
||||
out.push(" | ");
|
||||
}
|
||||
uniformRange(minS, maxS);
|
||||
return;
|
||||
}
|
||||
|
||||
const lessDecimals = Math.max(decimalsLeft - 1, 1);
|
||||
|
||||
if (hasMin) {
|
||||
if (minValue < 0) {
|
||||
out.push("\"-\" (");
|
||||
_generateMinMaxInt(null, -minValue, out, decimalsLeft, false);
|
||||
out.push(") | [0] | [1-9] ");
|
||||
moreDigits(0, decimalsLeft - 1);
|
||||
} else if (minValue === 0) {
|
||||
if (topLevel) {
|
||||
out.push("[0] | [1-9] ");
|
||||
moreDigits(0, lessDecimals);
|
||||
} else {
|
||||
moreDigits(1, decimalsLeft);
|
||||
}
|
||||
} else if (minValue <= 9) {
|
||||
const c = minValue.toString();
|
||||
const range_start = topLevel ? '1' : '0';
|
||||
if (c > range_start) {
|
||||
digitRange(range_start, String.fromCharCode(c.charCodeAt(0) - 1));
|
||||
out.push(" ");
|
||||
moreDigits(1, lessDecimals);
|
||||
out.push(" | ");
|
||||
}
|
||||
digitRange(c, "9");
|
||||
out.push(" ");
|
||||
moreDigits(0, lessDecimals);
|
||||
} else {
|
||||
const minS = minValue.toString();
|
||||
const length = minS.length;
|
||||
const c = minS[0];
|
||||
|
||||
if (c > "1") {
|
||||
digitRange(topLevel ? "1" : "0", String.fromCharCode(c.charCodeAt(0) - 1));
|
||||
out.push(" ");
|
||||
moreDigits(length, lessDecimals);
|
||||
out.push(" | ");
|
||||
}
|
||||
digitRange(c, c);
|
||||
out.push(" (");
|
||||
_generateMinMaxInt(parseInt(minS.slice(1)), null, out, lessDecimals, false);
|
||||
out.push(")");
|
||||
if (c < "9") {
|
||||
out.push(" | ");
|
||||
digitRange(String.fromCharCode(c.charCodeAt(0) + 1), "9");
|
||||
out.push(" ");
|
||||
moreDigits(length - 1, lessDecimals);
|
||||
}
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (hasMax) {
|
||||
if (maxValue >= 0) {
|
||||
if (topLevel) {
|
||||
out.push("\"-\" [1-9] ");
|
||||
moreDigits(0, lessDecimals);
|
||||
out.push(" | ");
|
||||
}
|
||||
_generateMinMaxInt(0, maxValue, out, decimalsLeft, true);
|
||||
} else {
|
||||
out.push("\"-\" (");
|
||||
_generateMinMaxInt(-maxValue, null, out, decimalsLeft, false);
|
||||
out.push(")");
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
throw new Error("At least one of minValue or maxValue must be set");
|
||||
}
|
||||
|
||||
class BuiltinRule {
|
||||
constructor(content, deps) {
|
||||
this.content = content;
|
||||
|
@ -64,7 +259,7 @@ const GRAMMAR_RANGE_LITERAL_ESCAPE_RE = /[\n\r"\]\-\\]/g;
|
|||
const GRAMMAR_LITERAL_ESCAPES = { '\r': '\\r', '\n': '\\n', '"': '\\"', '-': '\\-', ']': '\\]' };
|
||||
|
||||
const NON_LITERAL_SET = new Set('|.()[]{}*+?');
|
||||
const ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = new Set('[]()|{}*+?');
|
||||
const ESCAPED_IN_REGEXPS_BUT_NOT_IN_LITERALS = new Set('^$.[]()|{}*+?');
|
||||
|
||||
export class SchemaConverter {
|
||||
constructor(options) {
|
||||
|
@ -337,6 +532,64 @@ export class SchemaConverter {
|
|||
return this._addRule(name, "\"\\\"\" " + toRule(transform()) + " \"\\\"\" space")
|
||||
}
|
||||
|
||||
_notStrings(strings) {
|
||||
class TrieNode {
|
||||
constructor() {
|
||||
this.children = {};
|
||||
this.isEndOfString = false;
|
||||
}
|
||||
|
||||
insert(str) {
|
||||
let node = this;
|
||||
for (const c of str) {
|
||||
node = node.children[c] = node.children[c] || new TrieNode();
|
||||
}
|
||||
node.isEndOfString = true;
|
||||
}
|
||||
}
|
||||
|
||||
const trie = new TrieNode();
|
||||
for (const s of strings) {
|
||||
trie.insert(s);
|
||||
}
|
||||
|
||||
const charRuleName = this._addPrimitive('char', PRIMITIVE_RULES['char']);
|
||||
const out = ['["] ( '];
|
||||
|
||||
const visit = (node) => {
|
||||
const rejects = [];
|
||||
let first = true;
|
||||
for (const c of Object.keys(node.children).sort()) {
|
||||
const child = node.children[c];
|
||||
rejects.push(c);
|
||||
if (first) {
|
||||
first = false;
|
||||
} else {
|
||||
out.push(' | ');
|
||||
}
|
||||
out.push(`[${c}]`);
|
||||
if (Object.keys(child.children).length > 0) {
|
||||
out.push(' (');
|
||||
visit(child);
|
||||
out.push(')');
|
||||
} else if (child.isEndOfString) {
|
||||
out.push(` ${charRuleName}+`);
|
||||
}
|
||||
}
|
||||
if (Object.keys(node.children).length > 0) {
|
||||
if (!first) {
|
||||
out.push(' | ');
|
||||
}
|
||||
out.push(`[^"${rejects.join('')}] ${charRuleName}*`);
|
||||
}
|
||||
};
|
||||
|
||||
visit(trie);
|
||||
|
||||
out.push(` )${trie.isEndOfString ? '' : '?'} ["] space`);
|
||||
return out.join('');
|
||||
}
|
||||
|
||||
_resolveRef(ref) {
|
||||
let refName = ref.split('/').pop();
|
||||
if (!(refName in this._rules) && !this._refsBeingResolved.has(ref)) {
|
||||
|
@ -363,11 +616,11 @@ export class SchemaConverter {
|
|||
} else if (schema.oneOf || schema.anyOf) {
|
||||
return this._addRule(ruleName, this._generateUnionRule(name, schema.oneOf || schema.anyOf));
|
||||
} else if (Array.isArray(schemaType)) {
|
||||
return this._addRule(ruleName, this._generateUnionRule(name, schemaType.map(t => ({ type: t }))));
|
||||
return this._addRule(ruleName, this._generateUnionRule(name, schemaType.map(t => ({...schema, type: t}))));
|
||||
} else if ('const' in schema) {
|
||||
return this._addRule(ruleName, this._generateConstantRule(schema.const));
|
||||
return this._addRule(ruleName, this._generateConstantRule(schema.const) + ' space');
|
||||
} else if ('enum' in schema) {
|
||||
const rule = schema.enum.map(v => this._generateConstantRule(v)).join(' | ');
|
||||
const rule = '(' + schema.enum.map(v => this._generateConstantRule(v)).join(' | ') + ') space';
|
||||
return this._addRule(ruleName, rule);
|
||||
} else if ((schemaType === undefined || schemaType === 'object') &&
|
||||
('properties' in schema ||
|
||||
|
@ -404,7 +657,7 @@ export class SchemaConverter {
|
|||
}
|
||||
}
|
||||
|
||||
return this._addRule(ruleName, this._buildObjectRule(properties, required, name, /* additionalProperties= */ false));
|
||||
return this._addRule(ruleName, this._buildObjectRule(properties, required, name, null));
|
||||
} else if ((schemaType === undefined || schemaType === 'array') && ('items' in schema || 'prefixItems' in schema)) {
|
||||
const items = schema.items ?? schema.prefixItems;
|
||||
if (Array.isArray(items)) {
|
||||
|
@ -435,6 +688,24 @@ export class SchemaConverter {
|
|||
const minLen = schema.minLength || 0;
|
||||
const maxLen = schema.maxLength;
|
||||
return this._addRule(ruleName, '"\\\"" ' + _buildRepetition(charRuleName, minLen, maxLen) + ' "\\\"" space');
|
||||
} else if (schemaType === 'integer' && ('minimum' in schema || 'exclusiveMinimum' in schema || 'maximum' in schema || 'exclusiveMaximum' in schema)) {
|
||||
let minValue = null;
|
||||
let maxValue = null;
|
||||
if ('minimum' in schema) {
|
||||
minValue = schema.minimum;
|
||||
} else if ('exclusiveMinimum' in schema) {
|
||||
minValue = schema.exclusiveMinimum + 1;
|
||||
}
|
||||
if ('maximum' in schema) {
|
||||
maxValue = schema.maximum;
|
||||
} else if ('exclusiveMaximum' in schema) {
|
||||
maxValue = schema.exclusiveMaximum - 1;
|
||||
}
|
||||
|
||||
const out = ["("];
|
||||
_generateMinMaxInt(minValue, maxValue, out);
|
||||
out.push(") space");
|
||||
return this._addRule(ruleName, out.join(''));
|
||||
} else if ((schemaType === 'object') || (Object.keys(schema).length === 0)) {
|
||||
return this._addRule(ruleName, this._addPrimitive('object', PRIMITIVE_RULES['object']));
|
||||
} else {
|
||||
|
@ -480,12 +751,19 @@ export class SchemaConverter {
|
|||
const requiredProps = sortedProps.filter(k => required.has(k));
|
||||
const optionalProps = sortedProps.filter(k => !required.has(k));
|
||||
|
||||
if (typeof additionalProperties === 'object' || additionalProperties === true) {
|
||||
if (additionalProperties) {
|
||||
const subName = `${name ?? ''}${name ? '-' : ''}additional`;
|
||||
const valueRule = this.visit(additionalProperties === true ? {} : additionalProperties, `${subName}-value`);
|
||||
const valueRule =
|
||||
additionalProperties != null && typeof additionalProperties === 'object' ? this.visit(additionalProperties, `${subName}-value`)
|
||||
: this._addPrimitive('value', PRIMITIVE_RULES['value']);
|
||||
|
||||
const key_rule =
|
||||
sortedProps.length === 0 ? this._addPrimitive('string', PRIMITIVE_RULES['string'])
|
||||
: this._addRule(`${subName}-k`, this._notStrings(sortedProps));
|
||||
|
||||
propKvRuleNames['*'] = this._addRule(
|
||||
`${subName}-kv`,
|
||||
`${this._addPrimitive('string', PRIMITIVE_RULES['string'])} ":" space ${valueRule}`);
|
||||
`${key_rule} ":" space ${valueRule}`);
|
||||
optionalProps.push('*');
|
||||
}
|
||||
|
||||
|
@ -502,15 +780,11 @@ export class SchemaConverter {
|
|||
const [k, ...rest] = ks;
|
||||
const kvRuleName = propKvRuleNames[k];
|
||||
let res;
|
||||
if (k === '*') {
|
||||
res = this._addRule(
|
||||
`${name ?? ''}${name ? '-' : ''}additional-kvs`,
|
||||
`${kvRuleName} ( "," space ` + kvRuleName + ` )*`
|
||||
)
|
||||
} else if (firstIsOptional) {
|
||||
res = `( "," space ${kvRuleName} )?`;
|
||||
const commaRef = `( "," space ${kvRuleName} )`;
|
||||
if (firstIsOptional) {
|
||||
res = commaRef + (k === '*' ? '*' : '?');
|
||||
} else {
|
||||
res = kvRuleName;
|
||||
res = kvRuleName + (k === '*' ? ' ' + commaRef + '*' : '');
|
||||
}
|
||||
if (rest.length > 0) {
|
||||
res += ' ' + this._addRule(
|
||||
|
|
|
@ -3,6 +3,13 @@
|
|||
|
||||
by Humans for All.
|
||||
|
||||
## quickstart
|
||||
|
||||
To run from the build dir
|
||||
|
||||
bin/llama-server -m path/model.gguf --path ../examples/server/public_simplechat
|
||||
|
||||
Continue reading for the details.
|
||||
|
||||
## overview
|
||||
|
||||
|
@ -14,6 +21,8 @@ own system prompts.
|
|||
This allows seeing the generated text / ai-model response in oneshot at the end, after it is fully generated,
|
||||
or potentially as it is being generated, in a streamed manner from the server/ai-model.
|
||||
|
||||

|
||||
|
||||
Auto saves the chat session locally as and when the chat is progressing and inturn at a later time when you
|
||||
open SimpleChat, option is provided to restore the old chat session, if a matching one exists.
|
||||
|
||||
|
@ -44,12 +53,12 @@ http module.
|
|||
|
||||
### running using examples/server
|
||||
|
||||
bin/server -m path/model.gguf --path ../examples/server/public_simplechat [--port PORT]
|
||||
./llama-server -m path/model.gguf --path examples/server/public_simplechat [--port PORT]
|
||||
|
||||
### running using python3's server module
|
||||
|
||||
first run examples/server
|
||||
* bin/server -m path/model.gguf
|
||||
* ./llama-server -m path/model.gguf
|
||||
|
||||
next run this web front end in examples/server/public_simplechat
|
||||
* cd ../examples/server/public_simplechat
|
||||
|
@ -170,17 +179,23 @@ It is attached to the document object. Some of these can also be updated using t
|
|||
The histogram/freq based trimming logic is currently tuned for english language wrt its
|
||||
is-it-a-alpabetic|numeral-char regex match logic.
|
||||
|
||||
chatRequestOptions - maintains the list of options/fields to send along with chat request,
|
||||
apiRequestOptions - maintains the list of options/fields to send along with api request,
|
||||
irrespective of whether /chat/completions or /completions endpoint.
|
||||
|
||||
If you want to add additional options/fields to send to the server/ai-model, and or
|
||||
modify the existing options value or remove them, for now you can update this global var
|
||||
using browser's development-tools/console.
|
||||
|
||||
For string and numeric fields in chatRequestOptions, including even those added by a user
|
||||
at runtime by directly modifying gMe.chatRequestOptions, setting ui entries will be auto
|
||||
For string, numeric and boolean fields in apiRequestOptions, including even those added by a
|
||||
user at runtime by directly modifying gMe.apiRequestOptions, setting ui entries will be auto
|
||||
created.
|
||||
|
||||
cache_prompt option supported by example/server is allowed to be controlled by user, so that
|
||||
any caching supported wrt system-prompt and chat history, if usable can get used. When chat
|
||||
history sliding window is enabled, cache_prompt logic may or may not kick in at the backend
|
||||
wrt same, based on aspects related to model, positional encoding, attention mechanism etal.
|
||||
However system prompt should ideally get the benefit of caching.
|
||||
|
||||
headers - maintains the list of http headers sent when request is made to the server. By default
|
||||
Content-Type is set to application/json. Additionally Authorization entry is provided, which can
|
||||
be set if needed using the settings ui.
|
||||
|
@ -197,10 +212,10 @@ It is attached to the document object. Some of these can also be updated using t
|
|||
>0 : Send the latest chat history from the latest system prompt, limited to specified cnt.
|
||||
|
||||
|
||||
By using gMe's iRecentUserMsgCnt and chatRequestOptions.max_tokens one can try to control the
|
||||
implications of loading of the ai-model's context window by chat history, wrt chat response to
|
||||
some extent in a simple crude way. You may also want to control the context size enabled when
|
||||
the server loads ai-model, on the server end.
|
||||
By using gMe's iRecentUserMsgCnt and apiRequestOptions.max_tokens/n_predict one can try to control
|
||||
the implications of loading of the ai-model's context window by chat history, wrt chat response to
|
||||
some extent in a simple crude way. You may also want to control the context size enabled when the
|
||||
server loads ai-model, on the server end.
|
||||
|
||||
|
||||
Sometimes the browser may be stuborn with caching of the file, so your updates to html/css/js
|
||||
|
@ -237,12 +252,12 @@ also be started with a model context size of 1k or more, to be on safe side.
|
|||
internal n_predict, for now add the same here on the client side, maybe later add max_tokens
|
||||
to /completions endpoint handling code on server side.
|
||||
|
||||
NOTE: One may want to experiment with frequency/presence penalty fields in chatRequestOptions
|
||||
wrt the set of fields sent to server along with the user query. To check how the model behaves
|
||||
NOTE: One may want to experiment with frequency/presence penalty fields in apiRequestOptions
|
||||
wrt the set of fields sent to server along with the user query, to check how the model behaves
|
||||
wrt repeatations in general in the generated text response.
|
||||
|
||||
A end-user can change these behaviour by editing gMe from browser's devel-tool/console or by
|
||||
using the providing settings ui.
|
||||
using the provided settings ui (for settings exposed through the ui).
|
||||
|
||||
|
||||
### OpenAi / Equivalent API WebService
|
||||
|
@ -253,7 +268,7 @@ for a minimal chatting experimentation by setting the below.
|
|||
* the baseUrl in settings ui
|
||||
* https://api.openai.com/v1 or similar
|
||||
|
||||
* Wrt request body - gMe.chatRequestOptions
|
||||
* Wrt request body - gMe.apiRequestOptions
|
||||
* model (settings ui)
|
||||
* any additional fields if required in future
|
||||
|
||||
|
|
|
@ -222,8 +222,8 @@ class SimpleChat {
|
|||
* @param {Object} obj
|
||||
*/
|
||||
request_jsonstr_extend(obj) {
|
||||
for(let k in gMe.chatRequestOptions) {
|
||||
obj[k] = gMe.chatRequestOptions[k];
|
||||
for(let k in gMe.apiRequestOptions) {
|
||||
obj[k] = gMe.apiRequestOptions[k];
|
||||
}
|
||||
if (gMe.bStream) {
|
||||
obj["stream"] = true;
|
||||
|
@ -740,11 +740,12 @@ class Me {
|
|||
"Authorization": "", // Authorization: Bearer OPENAI_API_KEY
|
||||
}
|
||||
// Add needed fields wrt json object to be sent wrt LLM web services completions endpoint.
|
||||
this.chatRequestOptions = {
|
||||
this.apiRequestOptions = {
|
||||
"model": "gpt-3.5-turbo",
|
||||
"temperature": 0.7,
|
||||
"max_tokens": 1024,
|
||||
"n_predict": 1024,
|
||||
"cache_prompt": false,
|
||||
//"frequency_penalty": 1.2,
|
||||
//"presence_penalty": 1.2,
|
||||
};
|
||||
|
@ -800,51 +801,55 @@ class Me {
|
|||
|
||||
ui.el_create_append_p(`bStream:${this.bStream}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`bTrimGarbage:${this.bTrimGarbage}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`ApiEndPoint:${this.apiEP}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`iRecentUserMsgCnt:${this.iRecentUserMsgCnt}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`bCompletionFreshChatAlways:${this.bCompletionFreshChatAlways}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`bCompletionInsertStandardRolePrefix:${this.bCompletionInsertStandardRolePrefix}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`bTrimGarbage:${this.bTrimGarbage}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`iRecentUserMsgCnt:${this.iRecentUserMsgCnt}`, elDiv);
|
||||
|
||||
ui.el_create_append_p(`ApiEndPoint:${this.apiEP}`, elDiv);
|
||||
|
||||
}
|
||||
|
||||
ui.el_create_append_p(`chatRequestOptions:${JSON.stringify(this.chatRequestOptions, null, " - ")}`, elDiv);
|
||||
ui.el_create_append_p(`apiRequestOptions:${JSON.stringify(this.apiRequestOptions, null, " - ")}`, elDiv);
|
||||
ui.el_create_append_p(`headers:${JSON.stringify(this.headers, null, " - ")}`, elDiv);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto create ui input elements for fields in ChatRequestOptions
|
||||
* Auto create ui input elements for fields in apiRequestOptions
|
||||
* Currently supports text and number field types.
|
||||
* @param {HTMLDivElement} elDiv
|
||||
*/
|
||||
show_settings_chatrequestoptions(elDiv) {
|
||||
show_settings_apirequestoptions(elDiv) {
|
||||
let typeDict = {
|
||||
"string": "text",
|
||||
"number": "number",
|
||||
};
|
||||
let fs = document.createElement("fieldset");
|
||||
let legend = document.createElement("legend");
|
||||
legend.innerText = "ChatRequestOptions";
|
||||
legend.innerText = "ApiRequestOptions";
|
||||
fs.appendChild(legend);
|
||||
elDiv.appendChild(fs);
|
||||
for(const k in this.chatRequestOptions) {
|
||||
let val = this.chatRequestOptions[k];
|
||||
for(const k in this.apiRequestOptions) {
|
||||
let val = this.apiRequestOptions[k];
|
||||
let type = typeof(val);
|
||||
if (!((type == "string") || (type == "number"))) {
|
||||
continue;
|
||||
if (((type == "string") || (type == "number"))) {
|
||||
let inp = ui.el_creatediv_input(`Set${k}`, k, typeDict[type], this.apiRequestOptions[k], (val)=>{
|
||||
if (type == "number") {
|
||||
val = Number(val);
|
||||
}
|
||||
this.apiRequestOptions[k] = val;
|
||||
});
|
||||
fs.appendChild(inp.div);
|
||||
} else if (type == "boolean") {
|
||||
let bbtn = ui.el_creatediv_boolbutton(`Set{k}`, k, {true: "true", false: "false"}, val, (userVal)=>{
|
||||
this.apiRequestOptions[k] = userVal;
|
||||
});
|
||||
fs.appendChild(bbtn.div);
|
||||
}
|
||||
let inp = ui.el_creatediv_input(`Set${k}`, k, typeDict[type], this.chatRequestOptions[k], (val)=>{
|
||||
if (type == "number") {
|
||||
val = Number(val);
|
||||
}
|
||||
this.chatRequestOptions[k] = val;
|
||||
});
|
||||
fs.appendChild(inp.div);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -870,6 +875,23 @@ class Me {
|
|||
});
|
||||
elDiv.appendChild(bb.div);
|
||||
|
||||
bb = ui.el_creatediv_boolbutton("SetTrimGarbage", "TrimGarbage", {true: "[+] yes trim", false: "[-] dont trim"}, this.bTrimGarbage, (val)=>{
|
||||
this.bTrimGarbage = val;
|
||||
});
|
||||
elDiv.appendChild(bb.div);
|
||||
|
||||
this.show_settings_apirequestoptions(elDiv);
|
||||
|
||||
let sel = ui.el_creatediv_select("SetApiEP", "ApiEndPoint", ApiEP.Type, this.apiEP, (val)=>{
|
||||
this.apiEP = ApiEP.Type[val];
|
||||
});
|
||||
elDiv.appendChild(sel.div);
|
||||
|
||||
sel = ui.el_creatediv_select("SetChatHistoryInCtxt", "ChatHistoryInCtxt", this.sRecentUserMsgCnt, this.iRecentUserMsgCnt, (val)=>{
|
||||
this.iRecentUserMsgCnt = this.sRecentUserMsgCnt[val];
|
||||
});
|
||||
elDiv.appendChild(sel.div);
|
||||
|
||||
bb = ui.el_creatediv_boolbutton("SetCompletionFreshChatAlways", "CompletionFreshChatAlways", {true: "[+] yes fresh", false: "[-] no, with history"}, this.bCompletionFreshChatAlways, (val)=>{
|
||||
this.bCompletionFreshChatAlways = val;
|
||||
});
|
||||
|
@ -880,23 +902,6 @@ class Me {
|
|||
});
|
||||
elDiv.appendChild(bb.div);
|
||||
|
||||
bb = ui.el_creatediv_boolbutton("SetTrimGarbage", "TrimGarbage", {true: "[+] yes trim", false: "[-] dont trim"}, this.bTrimGarbage, (val)=>{
|
||||
this.bTrimGarbage = val;
|
||||
});
|
||||
elDiv.appendChild(bb.div);
|
||||
|
||||
let sel = ui.el_creatediv_select("SetChatHistoryInCtxt", "ChatHistoryInCtxt", this.sRecentUserMsgCnt, this.iRecentUserMsgCnt, (val)=>{
|
||||
this.iRecentUserMsgCnt = this.sRecentUserMsgCnt[val];
|
||||
});
|
||||
elDiv.appendChild(sel.div);
|
||||
|
||||
sel = ui.el_creatediv_select("SetApiEP", "ApiEndPoint", ApiEP.Type, this.apiEP, (val)=>{
|
||||
this.apiEP = ApiEP.Type[val];
|
||||
});
|
||||
elDiv.appendChild(sel.div);
|
||||
|
||||
this.show_settings_chatrequestoptions(elDiv);
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
BIN
examples/server/public_simplechat/simplechat_screens.webp
Normal file
BIN
examples/server/public_simplechat/simplechat_screens.webp
Normal file
Binary file not shown.
After Width: | Height: | Size: 21 KiB |
|
@ -1594,7 +1594,7 @@ struct server_context {
|
|||
} else {
|
||||
std::string prompt;
|
||||
if (task.data.contains("prompt") && task.data.at("prompt").is_string()) {
|
||||
json_value(task.data, "prompt", std::string());
|
||||
prompt = json_value(task.data, "prompt", std::string());
|
||||
}
|
||||
|
||||
slot = get_available_slot(prompt);
|
||||
|
@ -2020,6 +2020,7 @@ struct server_context {
|
|||
slot.t_start_generation = 0;
|
||||
|
||||
if (slot.infill) {
|
||||
const bool add_bos = llama_should_add_bos_token(model);
|
||||
bool suff_rm_leading_spc = true;
|
||||
if (params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
|
||||
params.input_suffix.erase(0, 1);
|
||||
|
@ -2035,11 +2036,21 @@ struct server_context {
|
|||
}
|
||||
|
||||
prefix_tokens.insert(prefix_tokens.begin(), llama_token_prefix(model));
|
||||
prefix_tokens.insert(prefix_tokens.begin(), llama_token_bos(model)); // always add BOS
|
||||
prefix_tokens.insert(prefix_tokens.end(), llama_token_suffix(model));
|
||||
prefix_tokens.insert(prefix_tokens.end(), suffix_tokens.begin(), suffix_tokens.end());
|
||||
prefix_tokens.push_back(llama_token_middle(model));
|
||||
prompt_tokens = prefix_tokens;
|
||||
suffix_tokens.insert(suffix_tokens.begin(), llama_token_suffix(model));
|
||||
|
||||
auto embd_inp = params.spm_infill ? suffix_tokens : prefix_tokens;
|
||||
auto embd_end = params.spm_infill ? prefix_tokens : suffix_tokens;
|
||||
if (add_bos) {
|
||||
embd_inp.insert(embd_inp.begin(), llama_token_bos(model));
|
||||
}
|
||||
embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
|
||||
|
||||
const llama_token middle_token = llama_token_middle(model);
|
||||
if (middle_token >= 0) {
|
||||
embd_inp.push_back(middle_token);
|
||||
}
|
||||
|
||||
prompt_tokens = embd_inp;
|
||||
} else {
|
||||
prompt_tokens = tokenize(slot.prompt, system_prompt.empty()); // add BOS if there isn't system prompt
|
||||
}
|
||||
|
@ -2606,17 +2617,9 @@ int main(int argc, char ** argv) {
|
|||
|
||||
// print sample chat example to make it clear which template is used
|
||||
{
|
||||
json chat;
|
||||
chat.push_back({{"role", "system"}, {"content", "You are a helpful assistant"}});
|
||||
chat.push_back({{"role", "user"}, {"content", "Hello"}});
|
||||
chat.push_back({{"role", "assistant"}, {"content", "Hi there"}});
|
||||
chat.push_back({{"role", "user"}, {"content", "How are you?"}});
|
||||
|
||||
const std::string chat_example = format_chat(ctx_server.model, params.chat_template, chat);
|
||||
|
||||
LOG_INFO("chat template", {
|
||||
{"chat_example", chat_example},
|
||||
{"built_in", params.chat_template.empty()},
|
||||
{"chat_example", llama_chat_format_example(ctx_server.model, params.chat_template)},
|
||||
{"built_in", params.chat_template.empty()},
|
||||
});
|
||||
}
|
||||
|
||||
|
|
|
@ -27,10 +27,8 @@ To mitigate it, you can increase values in `n_predict`, `kv_size`.
|
|||
|
||||
```shell
|
||||
cd ../../..
|
||||
mkdir build
|
||||
cd build
|
||||
cmake -DLLAMA_CURL=ON ../
|
||||
cmake --build . --target server
|
||||
cmake -B build -DLLAMA_CURL=ON
|
||||
cmake --build build --target llama-server
|
||||
```
|
||||
|
||||
2. Start the test: `./tests.sh`
|
||||
|
@ -40,7 +38,7 @@ It's possible to override some scenario steps values with environment variables:
|
|||
| variable | description |
|
||||
|--------------------------|------------------------------------------------------------------------------------------------|
|
||||
| `PORT` | `context.server_port` to set the listening port of the server during scenario, default: `8080` |
|
||||
| `LLAMA_SERVER_BIN_PATH` | to change the server binary path, default: `../../../build/bin/server` |
|
||||
| `LLAMA_SERVER_BIN_PATH` | to change the server binary path, default: `../../../build/bin/llama-server` |
|
||||
| `DEBUG` | "ON" to enable steps and server verbose mode `--verbose` |
|
||||
| `SERVER_LOG_FORMAT_JSON` | if set switch server logs to json format |
|
||||
| `N_GPU_LAYERS` | number of model layers to offload to VRAM `-ngl --n-gpu-layers` |
|
||||
|
|
|
@ -82,7 +82,7 @@ Feature: llama.cpp server
|
|||
|
||||
Examples: Prompts
|
||||
| response_format | n_predicted | re_content |
|
||||
| {"type": "json_object", "schema": {"const": "42"}} | 5 | "42" |
|
||||
| {"type": "json_object", "schema": {"const": "42"}} | 6 | "42" |
|
||||
| {"type": "json_object", "schema": {"items": [{"type": "integer"}]}} | 10 | \[ -300 \] |
|
||||
| {"type": "json_object"} | 10 | \{ " Jacky. |
|
||||
|
||||
|
|
|
@ -1272,9 +1272,9 @@ def context_text(context):
|
|||
|
||||
def start_server_background(context):
|
||||
if os.name == 'nt':
|
||||
context.server_path = '../../../build/bin/Release/server.exe'
|
||||
context.server_path = '../../../build/bin/Release/llama-server.exe'
|
||||
else:
|
||||
context.server_path = '../../../build/bin/server'
|
||||
context.server_path = '../../../build/bin/llama-server'
|
||||
if 'LLAMA_SERVER_BIN_PATH' in os.environ:
|
||||
context.server_path = os.environ['LLAMA_SERVER_BIN_PATH']
|
||||
server_listen_addr = context.server_fqdn
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue