Merge branch 'LostRuins:concedo' into concedo
This commit is contained in:
commit
4e46673f80
40 changed files with 6555 additions and 2099 deletions
|
@ -44,6 +44,7 @@ endif()
|
||||||
option(LLAMA_CUBLAS "llama: use cuBLAS" ON)
|
option(LLAMA_CUBLAS "llama: use cuBLAS" ON)
|
||||||
set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
|
set(LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels")
|
||||||
set(LLAMA_CUDA_DMMV_Y "1" CACHE STRING "llama: y block size for dmmv CUDA kernels")
|
set(LLAMA_CUDA_DMMV_Y "1" CACHE STRING "llama: y block size for dmmv CUDA kernels")
|
||||||
|
set(LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels")
|
||||||
option(LLAMA_CUDA_DMMV_F16 "llama: use 16 bit floats for dmmv CUDA kernels" OFF)
|
option(LLAMA_CUDA_DMMV_F16 "llama: use 16 bit floats for dmmv CUDA kernels" OFF)
|
||||||
set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for Q2_K/Q6_K")
|
set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for Q2_K/Q6_K")
|
||||||
option(LLAMA_K_QUANTS "llama: use k-quants" ON)
|
option(LLAMA_K_QUANTS "llama: use k-quants" ON)
|
||||||
|
@ -76,8 +77,11 @@ if (LLAMA_CUBLAS)
|
||||||
set(GGML_V2_LEGACY_CUDA_SOURCES otherarch/ggml_v2-cuda-legacy.cu otherarch/ggml_v2-cuda-legacy.h)
|
set(GGML_V2_LEGACY_CUDA_SOURCES otherarch/ggml_v2-cuda-legacy.cu otherarch/ggml_v2-cuda-legacy.h)
|
||||||
|
|
||||||
add_compile_definitions(GGML_USE_CUBLAS)
|
add_compile_definitions(GGML_USE_CUBLAS)
|
||||||
|
add_compile_definitions(GGML_CUDA_FORCE_DMMV) #non dmmv broken for me
|
||||||
|
|
||||||
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
|
add_compile_definitions(GGML_CUDA_DMMV_X=${LLAMA_CUDA_DMMV_X})
|
||||||
add_compile_definitions(GGML_CUDA_DMMV_Y=${LLAMA_CUDA_DMMV_Y})
|
add_compile_definitions(GGML_CUDA_DMMV_Y=${LLAMA_CUDA_DMMV_Y})
|
||||||
|
add_compile_definitions(GGML_CUDA_MMV_Y=${LLAMA_CUDA_MMV_Y})
|
||||||
if (LLAMA_CUDA_DMMV_F16)
|
if (LLAMA_CUDA_DMMV_F16)
|
||||||
add_compile_definitions(GGML_CUDA_DMMV_F16)
|
add_compile_definitions(GGML_CUDA_DMMV_F16)
|
||||||
endif()
|
endif()
|
||||||
|
@ -89,6 +93,15 @@ if (LLAMA_CUBLAS)
|
||||||
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
|
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
|
||||||
|
if (LLAMA_CUDA_DMMV_F16)
|
||||||
|
set(CMAKE_CUDA_ARCHITECTURES "61") # needed for f16 CUDA intrinsics
|
||||||
|
else()
|
||||||
|
set(CMAKE_CUDA_ARCHITECTURES "52;61") # lowest CUDA 12 standard + lowest for integer intrinsics
|
||||||
|
endif()
|
||||||
|
endif()
|
||||||
|
message(STATUS "Using CUDA architectures: ${CMAKE_CUDA_ARCHITECTURES}")
|
||||||
|
|
||||||
else()
|
else()
|
||||||
message(WARNING "cuBLAS not found")
|
message(WARNING "cuBLAS not found")
|
||||||
endif()
|
endif()
|
||||||
|
|
4
Makefile
4
Makefile
|
@ -144,16 +144,18 @@ ifdef LLAMA_CUBLAS
|
||||||
CUBLASLD_FLAGS = -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
|
CUBLASLD_FLAGS = -lcublas -lculibos -lcudart -lcublasLt -lpthread -ldl -lrt -L/usr/local/cuda/lib64 -L/opt/cuda/lib64 -L$(CUDA_PATH)/targets/x86_64-linux/lib
|
||||||
CUBLAS_OBJS = ggml-cuda.o ggml_v2-cuda.o ggml_v2-cuda-legacy.o
|
CUBLAS_OBJS = ggml-cuda.o ggml_v2-cuda.o ggml_v2-cuda-legacy.o
|
||||||
NVCC = nvcc
|
NVCC = nvcc
|
||||||
NVCCFLAGS = --forward-unknown-to-host-compiler -arch=native
|
NVCCFLAGS = --forward-unknown-to-host-compiler -arch=native -DGGML_CUDA_FORCE_DMMV
|
||||||
ifdef LLAMA_CUDA_DMMV_X
|
ifdef LLAMA_CUDA_DMMV_X
|
||||||
NVCCFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
|
NVCCFLAGS += -DGGML_CUDA_DMMV_X=$(LLAMA_CUDA_DMMV_X)
|
||||||
else
|
else
|
||||||
NVCCFLAGS += -DGGML_CUDA_DMMV_X=32
|
NVCCFLAGS += -DGGML_CUDA_DMMV_X=32
|
||||||
endif # LLAMA_CUDA_DMMV_X
|
endif # LLAMA_CUDA_DMMV_X
|
||||||
ifdef LLAMA_CUDA_DMMV_Y
|
ifdef LLAMA_CUDA_DMMV_Y
|
||||||
|
NVCCFLAGS += -DGGML_CUDA_MMV_Y=$(LLAMA_CUDA_MMV_Y)
|
||||||
NVCCFLAGS += -DGGML_CUDA_DMMV_Y=$(LLAMA_CUDA_DMMV_Y)
|
NVCCFLAGS += -DGGML_CUDA_DMMV_Y=$(LLAMA_CUDA_DMMV_Y)
|
||||||
else
|
else
|
||||||
NVCCFLAGS += -DGGML_CUDA_DMMV_Y=1
|
NVCCFLAGS += -DGGML_CUDA_DMMV_Y=1
|
||||||
|
NVCCFLAGS += -DGGML_CUDA_MMV_Y=1
|
||||||
endif # LLAMA_CUDA_DMMV_Y
|
endif # LLAMA_CUDA_DMMV_Y
|
||||||
ifdef LLAMA_CUDA_DMMV_F16
|
ifdef LLAMA_CUDA_DMMV_F16
|
||||||
NVCCFLAGS += -DGGML_CUDA_DMMV_F16
|
NVCCFLAGS += -DGGML_CUDA_DMMV_F16
|
||||||
|
|
|
@ -154,9 +154,15 @@ class Params:
|
||||||
# try transformer naming first
|
# try transformer naming first
|
||||||
if "model.layers.0.self_attn.q_proj.weight" in model:
|
if "model.layers.0.self_attn.q_proj.weight" in model:
|
||||||
n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model)
|
n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.q_proj.weight" not in model)
|
||||||
|
elif "model.layers.0.self_attn.W_pack.weight" in model: # next: try baichuan naming
|
||||||
|
n_layer=next(i for i in itertools.count() if f"model.layers.{i}.self_attn.W_pack.weight" not in model)
|
||||||
else:
|
else:
|
||||||
n_layer=next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model)
|
n_layer=next(i for i in itertools.count() if f"layers.{i}.attention.wq.weight" not in model)
|
||||||
|
|
||||||
|
if n_layer < 1:
|
||||||
|
raise Exception("failed to guess 'n_layer'. This model is unknown or unsupported.\n"
|
||||||
|
"Suggestion: provide 'config.json' of the model in the same directory containing model files.")
|
||||||
|
|
||||||
n_head=n_embd // 128 # guessed
|
n_head=n_embd // 128 # guessed
|
||||||
|
|
||||||
return Params(
|
return Params(
|
||||||
|
|
|
@ -7,7 +7,7 @@
|
||||||
cd `dirname $0`
|
cd `dirname $0`
|
||||||
cd ..
|
cd ..
|
||||||
|
|
||||||
./main -m ./models/ggml-alpaca-7b-q4.bin \
|
./main -m ./models/alpaca.13b.ggmlv3.q8_0.bin \
|
||||||
--color \
|
--color \
|
||||||
-f ./prompts/alpaca.txt \
|
-f ./prompts/alpaca.txt \
|
||||||
--ctx_size 2048 \
|
--ctx_size 2048 \
|
||||||
|
|
|
@ -31,7 +31,7 @@ struct gpt_params {
|
||||||
int32_t n_gpu_layers = 0; // number of layers to store in VRAM
|
int32_t n_gpu_layers = 0; // number of layers to store in VRAM
|
||||||
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
|
||||||
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
|
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
|
||||||
bool low_vram = 0; // if true, reduce VRAM usage at the cost of performance
|
int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
|
||||||
|
|
||||||
// sampling parameters
|
// sampling parameters
|
||||||
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
|
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
|
||||||
|
@ -59,6 +59,7 @@ struct gpt_params {
|
||||||
std::string lora_adapter = ""; // lora adapter path
|
std::string lora_adapter = ""; // lora adapter path
|
||||||
std::string lora_base = ""; // base model path for the lora adapter
|
std::string lora_base = ""; // base model path for the lora adapter
|
||||||
|
|
||||||
|
bool low_vram = false; // if true, reduce VRAM usage at the cost of performance
|
||||||
bool memory_f16 = true; // use f16 instead of f32 for memory kv
|
bool memory_f16 = true; // use f16 instead of f32 for memory kv
|
||||||
bool random_prompt = false; // do not randomize prompt if none provided
|
bool random_prompt = false; // do not randomize prompt if none provided
|
||||||
bool use_color = false; // use color to distinguish generations and inputs
|
bool use_color = false; // use color to distinguish generations and inputs
|
||||||
|
|
|
@ -29,7 +29,7 @@ struct MyModel* create_mymodel(int argc, char ** argv) {
|
||||||
|
|
||||||
fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
|
fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT);
|
||||||
|
|
||||||
if (params.seed < 0) {
|
if (params.seed == LLAMA_DEFAULT_SEED) {
|
||||||
params.seed = time(NULL);
|
params.seed = time(NULL);
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
fprintf(stderr, "%s: seed = %d\n", __func__, params.seed);
|
||||||
|
|
|
@ -18,7 +18,7 @@ int main(int argc, char ** argv) {
|
||||||
params.embedding = true;
|
params.embedding = true;
|
||||||
|
|
||||||
if (params.n_ctx > 2048) {
|
if (params.n_ctx > 2048) {
|
||||||
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
|
||||||
"expect poor results\n", __func__, params.n_ctx);
|
"expect poor results\n", __func__, params.n_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -85,7 +85,7 @@ int main(int argc, char ** argv) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (params.n_ctx > 2048) {
|
if (params.n_ctx > 2048) {
|
||||||
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
|
||||||
"expect poor results\n", __func__, params.n_ctx);
|
"expect poor results\n", __func__, params.n_ctx);
|
||||||
} else if (params.n_ctx < 8) {
|
} else if (params.n_ctx < 8) {
|
||||||
fprintf(stderr, "%s: warning: minimum context size is 8, using minimum size.\n", __func__);
|
fprintf(stderr, "%s: warning: minimum context size is 8, using minimum size.\n", __func__);
|
||||||
|
|
|
@ -130,7 +130,7 @@ int main(int argc, char ** argv) {
|
||||||
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
params.n_batch = std::min(params.n_batch, params.n_ctx);
|
||||||
|
|
||||||
if (params.n_ctx > 2048) {
|
if (params.n_ctx > 2048) {
|
||||||
fprintf(stderr, "%s: warning: model does not support context sizes greater than 2048 tokens (%d specified);"
|
fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);"
|
||||||
"expect poor results\n", __func__, params.n_ctx);
|
"expect poor results\n", __func__, params.n_ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -147,7 +147,7 @@ void test_roundtrip_on_chunk(
|
||||||
const ggml_tensor * layer,
|
const ggml_tensor * layer,
|
||||||
int64_t offset,
|
int64_t offset,
|
||||||
int64_t chunk_size,
|
int64_t chunk_size,
|
||||||
const quantize_fns_t & qfns,
|
const ggml_type_traits_t & qfns,
|
||||||
bool use_reference,
|
bool use_reference,
|
||||||
float * input_scratch,
|
float * input_scratch,
|
||||||
char * quantized_scratch,
|
char * quantized_scratch,
|
||||||
|
@ -163,11 +163,11 @@ void test_roundtrip_on_chunk(
|
||||||
}
|
}
|
||||||
|
|
||||||
if (use_reference) {
|
if (use_reference) {
|
||||||
qfns.quantize_row_q_reference(input_scratch, quantized_scratch, chunk_size);
|
qfns.from_float_reference(input_scratch, quantized_scratch, chunk_size);
|
||||||
} else {
|
} else {
|
||||||
qfns.quantize_row_q(input_scratch, quantized_scratch, chunk_size);
|
qfns.from_float(input_scratch, quantized_scratch, chunk_size);
|
||||||
}
|
}
|
||||||
qfns.dequantize_row_q(quantized_scratch, output_scratch, chunk_size);
|
qfns.to_float(quantized_scratch, output_scratch, chunk_size);
|
||||||
|
|
||||||
update_error_stats(chunk_size, input_scratch, output_scratch, stats);
|
update_error_stats(chunk_size, input_scratch, output_scratch, stats);
|
||||||
}
|
}
|
||||||
|
@ -177,7 +177,7 @@ void test_roundtrip_on_chunk(
|
||||||
void test_roundtrip_on_layer(
|
void test_roundtrip_on_layer(
|
||||||
std::string & name,
|
std::string & name,
|
||||||
bool print_layer_stats,
|
bool print_layer_stats,
|
||||||
const quantize_fns_t & qfns,
|
const ggml_type_traits_t & qfns,
|
||||||
bool use_reference,
|
bool use_reference,
|
||||||
const ggml_tensor * layer,
|
const ggml_tensor * layer,
|
||||||
std::vector<float> & input_scratch,
|
std::vector<float> & input_scratch,
|
||||||
|
@ -388,8 +388,8 @@ int main(int argc, char ** argv) {
|
||||||
if (!params.include_types.empty() && std::find(params.include_types.begin(), params.include_types.end(), i) == params.include_types.end()) {
|
if (!params.include_types.empty() && std::find(params.include_types.begin(), params.include_types.end(), i) == params.include_types.end()) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
quantize_fns_t qfns = ggml_internal_get_quantize_fn(i);
|
ggml_type_traits_t qfns = ggml_internal_get_type_traits(type);
|
||||||
if (qfns.quantize_row_q && qfns.dequantize_row_q) {
|
if (qfns.from_float && qfns.to_float) {
|
||||||
if (params.verbose) {
|
if (params.verbose) {
|
||||||
printf("testing %s ...\n", ggml_type_name(type));
|
printf("testing %s ...\n", ggml_type_name(type));
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
# llama.cpp/example/server
|
# llama.cpp/example/server
|
||||||
|
|
||||||
This example demonstrates a simple HTTP API server to interact with llama.cpp.
|
This example demonstrates a simple HTTP API server and a simple web front end to interact with llama.cpp.
|
||||||
|
|
||||||
Command line options:
|
Command line options:
|
||||||
|
|
||||||
- `--threads N`, `-t N`: Set the number of threads to use during computation.
|
- `--threads N`, `-t N`: Set the number of threads to use during computation.
|
||||||
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`).
|
- `-m FNAME`, `--model FNAME`: Specify the path to the LLaMA model file (e.g., `models/7B/ggml-model.bin`).
|
||||||
- `-m ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
- `-m ALIAS`, `--alias ALIAS`: Set an alias for the model. The alias will be returned in API responses.
|
||||||
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference.
|
- `-c N`, `--ctx-size N`: Set the size of the prompt context. The default is 512, but LLaMA models were built with a context of 2048, which will provide better results for longer input/inference. The size may differ in other models, for example, baichuan models were build with a context of 4096.
|
||||||
- `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
- `-ngl N`, `--n-gpu-layers N`: When compiled with appropriate support (currently CLBlast or cuBLAS), this option allows offloading some layers to the GPU for computation. Generally results in increased performance.
|
||||||
- `-mg i, --main-gpu i`: When using multiple GPUs this option controls which GPU is used for small tensors for which the overhead of splitting the computation across all GPUs is not worthwhile. The GPU in question will use slightly more VRAM to store a scratch buffer for temporary results. By default GPU 0 is used. Requires cuBLAS.
|
- `-mg i, --main-gpu i`: When using multiple GPUs this option controls which GPU is used for small tensors for which the overhead of splitting the computation across all GPUs is not worthwhile. The GPU in question will use slightly more VRAM to store a scratch buffer for temporary results. By default GPU 0 is used. Requires cuBLAS.
|
||||||
- `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance. Requires cuBLAS.
|
- `-ts SPLIT, --tensor-split SPLIT`: When using multiple GPUs this option controls how large tensors should be split across all GPUs. `SPLIT` is a comma-separated list of non-negative values that assigns the proportion of data that each GPU should get in order. For example, "3,2" will assign 60% of the data to GPU 0 and 40% to GPU 1. By default the data is split in proportion to VRAM but this may not be optimal for performance. Requires cuBLAS.
|
||||||
|
@ -21,24 +21,22 @@ Command line options:
|
||||||
- `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`.
|
- `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`.
|
||||||
- `--host`: Set the hostname or ip address to listen. Default `127.0.0.1`.
|
- `--host`: Set the hostname or ip address to listen. Default `127.0.0.1`.
|
||||||
- `--port`: Set the port to listen. Default: `8080`.
|
- `--port`: Set the port to listen. Default: `8080`.
|
||||||
|
- `--path`: path from which to serve static files (default examples/server/public)
|
||||||
- `--embedding`: Enable embedding extraction, Default: disabled.
|
- `--embedding`: Enable embedding extraction, Default: disabled.
|
||||||
|
|
||||||
## Build
|
## Build
|
||||||
|
|
||||||
Build llama.cpp with server from repository root with either make or CMake.
|
server is build alongside everything else from the root of the project
|
||||||
|
|
||||||
- Using `make`:
|
- Using `make`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
LLAMA_BUILD_SERVER=1 make
|
make
|
||||||
```
|
```
|
||||||
|
|
||||||
- Using `CMake`:
|
- Using `CMake`:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
mkdir build-server
|
|
||||||
cd build-server
|
|
||||||
cmake -DLLAMA_BUILD_SERVER=ON ..
|
|
||||||
cmake --build . --config Release
|
cmake --build . --config Release
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -59,7 +57,7 @@ server.exe -m models\7B\ggml-model.bin -c 2048
|
||||||
```
|
```
|
||||||
|
|
||||||
The above command will start a server that by default listens on `127.0.0.1:8080`.
|
The above command will start a server that by default listens on `127.0.0.1:8080`.
|
||||||
You can consume the endpoints with Postman or NodeJS with axios library.
|
You can consume the endpoints with Postman or NodeJS with axios library. You can visit the web front end at the same url.
|
||||||
|
|
||||||
## Testing with CURL
|
## Testing with CURL
|
||||||
|
|
||||||
|
@ -190,3 +188,49 @@ Run with bash:
|
||||||
```sh
|
```sh
|
||||||
bash chat.sh
|
bash chat.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
|
### API like OAI
|
||||||
|
|
||||||
|
API example using Python Flask: [api_like_OAI.py](api_like_OAI.py)
|
||||||
|
This example must be used with server.cpp
|
||||||
|
|
||||||
|
```sh
|
||||||
|
python api_like_OAI.py
|
||||||
|
```
|
||||||
|
|
||||||
|
After running the API server, you can use it in Python by setting the API base URL.
|
||||||
|
```python
|
||||||
|
openai.api_base = "http://<Your api-server IP>:port"
|
||||||
|
```
|
||||||
|
|
||||||
|
Then you can utilize llama.cpp as an OpenAI's **chat.completion** or **text_completion** API
|
||||||
|
|
||||||
|
### Extending or building alternative Web Front End
|
||||||
|
|
||||||
|
The default location for the static files is `examples/server/public`. You can extend the front end by running the server binary with `--path` set to `./your-directory` and importing `/completion.js` to get access to the llamaComplete() method.
|
||||||
|
|
||||||
|
Read the documentation in `/completion.js` to see convenient ways to access llama.
|
||||||
|
|
||||||
|
A simple example is below:
|
||||||
|
|
||||||
|
```html
|
||||||
|
<html>
|
||||||
|
<body>
|
||||||
|
<pre>
|
||||||
|
<script type="module">
|
||||||
|
import { llama } from '/completion.js'
|
||||||
|
|
||||||
|
const prompt = `### Instruction:
|
||||||
|
Write dad jokes, each one paragraph.
|
||||||
|
You can use html formatting if needed.
|
||||||
|
|
||||||
|
### Response:`
|
||||||
|
|
||||||
|
for await (const chunk of llama(prompt)) {
|
||||||
|
document.write(chunk.data.content)
|
||||||
|
}
|
||||||
|
</script>
|
||||||
|
</pre>
|
||||||
|
</body>
|
||||||
|
</html>
|
||||||
|
```
|
||||||
|
|
219
examples/server/api_like_OAI.py
Executable file
219
examples/server/api_like_OAI.py
Executable file
|
@ -0,0 +1,219 @@
|
||||||
|
import argparse
|
||||||
|
from flask import Flask, jsonify, request, Response
|
||||||
|
import urllib.parse
|
||||||
|
import requests
|
||||||
|
import time
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
|
app = Flask(__name__)
|
||||||
|
|
||||||
|
parser = argparse.ArgumentParser(description="An example of using server.cpp with a similar API to OAI. It must be used together with server.cpp.")
|
||||||
|
parser.add_argument("--chat-prompt", type=str, help="the top prompt in chat completions(default: 'A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')", default='A chat between a curious user and an artificial intelligence assistant. The assistant follows the given rules no matter what.\\n')
|
||||||
|
parser.add_argument("--user-name", type=str, help="USER name in chat completions(default: '\\nUSER: ')", default="\\nUSER: ")
|
||||||
|
parser.add_argument("--ai-name", type=str, help="ASSISTANT name in chat completions(default: '\\nASSISTANT: ')", default="\\nASSISTANT: ")
|
||||||
|
parser.add_argument("--system-name", type=str, help="SYSTEM name in chat completions(default: '\\nASSISTANT's RULE: ')", default="\\nASSISTANT's RULE: ")
|
||||||
|
parser.add_argument("--stop", type=str, help="the end of response in chat completions(default: '</s>')", default="</s>")
|
||||||
|
parser.add_argument("--llama-api", type=str, help="Set the address of server.cpp in llama.cpp(default: http://127.0.0.1:8080)", default='http://127.0.0.1:8080')
|
||||||
|
parser.add_argument("--api-key", type=str, help="Set the api key to allow only few user(default: NULL)", default="")
|
||||||
|
parser.add_argument("--host", type=str, help="Set the ip address to listen.(default: 127.0.0.1)", default='127.0.0.1')
|
||||||
|
parser.add_argument("--port", type=int, help="Set the port to listen.(default: 8081)", default=8081)
|
||||||
|
|
||||||
|
args = parser.parse_args()
|
||||||
|
|
||||||
|
def is_present(json, key):
|
||||||
|
try:
|
||||||
|
buf = json[key]
|
||||||
|
except KeyError:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#convert chat to prompt
|
||||||
|
def convert_chat(messages):
|
||||||
|
prompt = "" + args.chat_prompt.replace("\\n", "\n")
|
||||||
|
|
||||||
|
system_n = args.system_name.replace("\\n", "\n")
|
||||||
|
user_n = args.user_name.replace("\\n", "\n")
|
||||||
|
ai_n = args.ai_name.replace("\\n", "\n")
|
||||||
|
stop = args.stop.replace("\\n", "\n")
|
||||||
|
|
||||||
|
|
||||||
|
for line in messages:
|
||||||
|
if (line["role"] == "system"):
|
||||||
|
prompt += f"{system_n}{line['content']}"
|
||||||
|
if (line["role"] == "user"):
|
||||||
|
prompt += f"{user_n}{line['content']}"
|
||||||
|
if (line["role"] == "assistant"):
|
||||||
|
prompt += f"{ai_n}{line['content']}{stop}"
|
||||||
|
prompt += ai_n.rstrip()
|
||||||
|
|
||||||
|
return prompt
|
||||||
|
|
||||||
|
def make_postData(body, chat=False, stream=False):
|
||||||
|
postData = {}
|
||||||
|
if (chat):
|
||||||
|
postData["prompt"] = convert_chat(body["messages"])
|
||||||
|
else:
|
||||||
|
postData["prompt"] = body["prompt"]
|
||||||
|
if(is_present(body, "temperature")): postData["temperature"] = body["temperature"]
|
||||||
|
if(is_present(body, "top_k")): postData["top_k"] = body["top_k"]
|
||||||
|
if(is_present(body, "top_p")): postData["top_p"] = body["top_p"]
|
||||||
|
if(is_present(body, "max_tokens")): postData["n_predict"] = body["max_tokens"]
|
||||||
|
if(is_present(body, "presence_penalty")): postData["presence_penalty"] = body["presence_penalty"]
|
||||||
|
if(is_present(body, "frequency_penalty")): postData["frequency_penalty"] = body["frequency_penalty"]
|
||||||
|
if(is_present(body, "repeat_penalty")): postData["repeat_penalty"] = body["repeat_penalty"]
|
||||||
|
if(is_present(body, "mirostat")): postData["mirostat"] = body["mirostat"]
|
||||||
|
if(is_present(body, "mirostat_tau")): postData["mirostat_tau"] = body["mirostat_tau"]
|
||||||
|
if(is_present(body, "mirostat_eta")): postData["mirostat_eta"] = body["mirostat_eta"]
|
||||||
|
if(is_present(body, "seed")): postData["seed"] = body["seed"]
|
||||||
|
if(is_present(body, "logit_bias")): postData["logit_bias"] = [[int(token), body["logit_bias"][token]] for token in body["logit_bias"].keys()]
|
||||||
|
if (args.stop != ""):
|
||||||
|
postData["stop"] = [args.stop]
|
||||||
|
else:
|
||||||
|
postData["stop"] = []
|
||||||
|
if(is_present(body, "stop")): postData["stop"] += body["stop"]
|
||||||
|
postData["n_keep"] = -1
|
||||||
|
postData["stream"] = stream
|
||||||
|
|
||||||
|
return postData
|
||||||
|
|
||||||
|
def make_resData(data, chat=False, promptToken=[]):
|
||||||
|
resData = {
|
||||||
|
"id": "chatcmpl" if (chat) else "cmpl",
|
||||||
|
"object": "chat.completion" if (chat) else "text_completion",
|
||||||
|
"created": int(time.time()),
|
||||||
|
"truncated": data["truncated"],
|
||||||
|
"model": "LLaMA_CPP",
|
||||||
|
"usage": {
|
||||||
|
"prompt_tokens": data["tokens_evaluated"],
|
||||||
|
"completion_tokens": data["tokens_predicted"],
|
||||||
|
"total_tokens": data["tokens_evaluated"] + data["tokens_predicted"]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (len(promptToken) != 0):
|
||||||
|
resData["promptToken"] = promptToken
|
||||||
|
if (chat):
|
||||||
|
#only one choice is supported
|
||||||
|
resData["choices"] = [{
|
||||||
|
"index": 0,
|
||||||
|
"message": {
|
||||||
|
"role": "assistant",
|
||||||
|
"content": data["content"],
|
||||||
|
},
|
||||||
|
"finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
|
||||||
|
}]
|
||||||
|
else:
|
||||||
|
#only one choice is supported
|
||||||
|
resData["choices"] = [{
|
||||||
|
"text": data["content"],
|
||||||
|
"index": 0,
|
||||||
|
"logprobs": None,
|
||||||
|
"finish_reason": "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
|
||||||
|
}]
|
||||||
|
return resData
|
||||||
|
|
||||||
|
def make_resData_stream(data, chat=False, time_now = 0, start=False):
|
||||||
|
resData = {
|
||||||
|
"id": "chatcmpl" if (chat) else "cmpl",
|
||||||
|
"object": "chat.completion.chunk" if (chat) else "text_completion.chunk",
|
||||||
|
"created": time_now,
|
||||||
|
"model": "LLaMA_CPP",
|
||||||
|
"choices": [
|
||||||
|
{
|
||||||
|
"finish_reason": None,
|
||||||
|
"index": 0
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
if (chat):
|
||||||
|
if (start):
|
||||||
|
resData["choices"][0]["delta"] = {
|
||||||
|
"role": "assistant"
|
||||||
|
}
|
||||||
|
else:
|
||||||
|
resData["choices"][0]["delta"] = {
|
||||||
|
"content": data["content"]
|
||||||
|
}
|
||||||
|
if (data["stop"]):
|
||||||
|
resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
|
||||||
|
else:
|
||||||
|
resData["choices"][0]["text"] = data["content"]
|
||||||
|
if (data["stop"]):
|
||||||
|
resData["choices"][0]["finish_reason"] = "stop" if (data["stopped_eos"] or data["stopped_word"]) else "length"
|
||||||
|
|
||||||
|
return resData
|
||||||
|
|
||||||
|
|
||||||
|
@app.route('/chat/completions', methods=['POST'])
|
||||||
|
@app.route('/v1/chat/completions', methods=['POST'])
|
||||||
|
def chat_completions():
|
||||||
|
if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key):
|
||||||
|
return Response(status=403)
|
||||||
|
body = request.get_json()
|
||||||
|
stream = False
|
||||||
|
tokenize = False
|
||||||
|
if(is_present(body, "stream")): stream = body["stream"]
|
||||||
|
if(is_present(body, "tokenize")): tokenize = body["tokenize"]
|
||||||
|
postData = make_postData(body, chat=True, stream=stream)
|
||||||
|
|
||||||
|
promptToken = []
|
||||||
|
if (tokenize):
|
||||||
|
tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json()
|
||||||
|
promptToken = tokenData["tokens"]
|
||||||
|
|
||||||
|
if (not stream):
|
||||||
|
data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData))
|
||||||
|
print(data.json())
|
||||||
|
resData = make_resData(data.json(), chat=True, promptToken=promptToken)
|
||||||
|
return jsonify(resData)
|
||||||
|
else:
|
||||||
|
def generate():
|
||||||
|
data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True)
|
||||||
|
time_now = int(time.time())
|
||||||
|
resData = make_resData_stream({}, chat=True, time_now=time_now, start=True)
|
||||||
|
yield 'data: {}\n'.format(json.dumps(resData))
|
||||||
|
for line in data.iter_lines():
|
||||||
|
if line:
|
||||||
|
decoded_line = line.decode('utf-8')
|
||||||
|
resData = make_resData_stream(json.loads(decoded_line[6:]), chat=True, time_now=time_now)
|
||||||
|
yield 'data: {}\n'.format(json.dumps(resData))
|
||||||
|
return Response(generate(), mimetype='text/event-stream')
|
||||||
|
|
||||||
|
|
||||||
|
@app.route('/completions', methods=['POST'])
|
||||||
|
@app.route('/v1/completions', methods=['POST'])
|
||||||
|
def completion():
|
||||||
|
if (args.api_key != "" and request.headers["Authorization"].split()[1] != args.api_key):
|
||||||
|
return Response(status=403)
|
||||||
|
body = request.get_json()
|
||||||
|
stream = False
|
||||||
|
tokenize = False
|
||||||
|
if(is_present(body, "stream")): stream = body["stream"]
|
||||||
|
if(is_present(body, "tokenize")): tokenize = body["tokenize"]
|
||||||
|
postData = make_postData(body, chat=False, stream=stream)
|
||||||
|
|
||||||
|
promptToken = []
|
||||||
|
if (tokenize):
|
||||||
|
tokenData = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/tokenize"), data=json.dumps({"content": postData["prompt"]})).json()
|
||||||
|
promptToken = tokenData["tokens"]
|
||||||
|
|
||||||
|
if (not stream):
|
||||||
|
data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData))
|
||||||
|
print(data.json())
|
||||||
|
resData = make_resData(data.json(), chat=False, promptToken=promptToken)
|
||||||
|
return jsonify(resData)
|
||||||
|
else:
|
||||||
|
def generate():
|
||||||
|
data = requests.request("POST", urllib.parse.urljoin(args.llama_api, "/completion"), data=json.dumps(postData), stream=True)
|
||||||
|
time_now = int(time.time())
|
||||||
|
for line in data.iter_lines():
|
||||||
|
if line:
|
||||||
|
decoded_line = line.decode('utf-8')
|
||||||
|
resData = make_resData_stream(json.loads(decoded_line[6:]), chat=False, time_now=time_now)
|
||||||
|
yield 'data: {}\n'.format(json.dumps(resData))
|
||||||
|
return Response(generate(), mimetype='text/event-stream')
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
app.run(args.host, port=args.port)
|
375
examples/server/completion.js.hpp
Normal file
375
examples/server/completion.js.hpp
Normal file
|
@ -0,0 +1,375 @@
|
||||||
|
unsigned char completion_js[] = {
|
||||||
|
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x44,
|
||||||
|
0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x3a, 0x20, 0x74, 0x72,
|
||||||
|
0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64,
|
||||||
|
0x69, 0x63, 0x74, 0x3a, 0x20, 0x35, 0x30, 0x30, 0x2c, 0x0a, 0x20, 0x20,
|
||||||
|
0x74, 0x65, 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a,
|
||||||
|
0x20, 0x30, 0x2e, 0x32, 0x2c, 0x0a, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70,
|
||||||
|
0x3a, 0x20, 0x5b, 0x22, 0x3c, 0x2f, 0x73, 0x3e, 0x22, 0x5d, 0x0a, 0x7d,
|
||||||
|
0x3b, 0x0a, 0x0a, 0x6c, 0x65, 0x74, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72,
|
||||||
|
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e,
|
||||||
|
0x67, 0x73, 0x20, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x0a,
|
||||||
|
0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65,
|
||||||
|
0x73, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74,
|
||||||
|
0x20, 0x61, 0x73, 0x20, 0x61, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61,
|
||||||
|
0x74, 0x6f, 0x72, 0x2e, 0x20, 0x52, 0x65, 0x63, 0x6f, 0x6d, 0x6d, 0x65,
|
||||||
|
0x6e, 0x64, 0x65, 0x64, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x6d, 0x6f, 0x73,
|
||||||
|
0x74, 0x20, 0x75, 0x73, 0x65, 0x20, 0x63, 0x61, 0x73, 0x65, 0x73, 0x2e,
|
||||||
|
0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70,
|
||||||
|
0x6c, 0x65, 0x3a, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c,
|
||||||
|
0x61, 0x6d, 0x61, 0x20, 0x7d, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27,
|
||||||
|
0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x2e,
|
||||||
|
0x6a, 0x73, 0x27, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65,
|
||||||
|
0x73, 0x74, 0x20, 0x3d, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x22,
|
||||||
|
0x54, 0x65, 0x6c, 0x6c, 0x20, 0x6d, 0x65, 0x20, 0x61, 0x20, 0x6a, 0x6f,
|
||||||
|
0x6b, 0x65, 0x22, 0x2c, 0x20, 0x7b, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64,
|
||||||
|
0x69, 0x63, 0x74, 0x3a, 0x20, 0x38, 0x30, 0x30, 0x7d, 0x29, 0x0a, 0x2f,
|
||||||
|
0x2f, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61,
|
||||||
|
0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68,
|
||||||
|
0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x72, 0x65, 0x71, 0x75, 0x65,
|
||||||
|
0x73, 0x74, 0x29, 0x20, 0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77,
|
||||||
|
0x72, 0x69, 0x74, 0x65, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64,
|
||||||
|
0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29,
|
||||||
|
0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x2f, 0x2f, 0x0a,
|
||||||
|
0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63,
|
||||||
|
0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2a, 0x20, 0x6c,
|
||||||
|
0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c,
|
||||||
|
0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x7d,
|
||||||
|
0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x3d, 0x20, 0x7b,
|
||||||
|
0x7d, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63,
|
||||||
|
0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x3d, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
||||||
|
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x69, 0x66,
|
||||||
|
0x20, 0x28, 0x21, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
|
||||||
|
0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65,
|
||||||
|
0x77, 0x20, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72,
|
||||||
|
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d,
|
||||||
|
0x0a, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f,
|
||||||
|
0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61,
|
||||||
|
0x6d, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61,
|
||||||
|
0x72, 0x61, 0x6d, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x73, 0x2c,
|
||||||
|
0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20,
|
||||||
|
0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20,
|
||||||
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6f,
|
||||||
|
0x6e, 0x73, 0x65, 0x20, 0x3d, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20,
|
||||||
|
0x66, 0x65, 0x74, 0x63, 0x68, 0x28, 0x22, 0x2f, 0x63, 0x6f, 0x6d, 0x70,
|
||||||
|
0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x22, 0x2c, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, 0x3a, 0x20, 0x27,
|
||||||
|
0x50, 0x4f, 0x53, 0x54, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x62,
|
||||||
|
0x6f, 0x64, 0x79, 0x3a, 0x20, 0x4a, 0x53, 0x4f, 0x4e, 0x2e, 0x73, 0x74,
|
||||||
|
0x72, 0x69, 0x6e, 0x67, 0x69, 0x66, 0x79, 0x28, 0x63, 0x6f, 0x6d, 0x70,
|
||||||
|
0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
||||||
|
0x29, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x68, 0x65, 0x61, 0x64, 0x65,
|
||||||
|
0x72, 0x73, 0x3a, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x27, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x27,
|
||||||
|
0x3a, 0x20, 0x27, 0x6b, 0x65, 0x65, 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76,
|
||||||
|
0x65, 0x27, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x27, 0x43,
|
||||||
|
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x54, 0x79, 0x70, 0x65, 0x27,
|
||||||
|
0x3a, 0x20, 0x27, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69,
|
||||||
|
0x6f, 0x6e, 0x2f, 0x6a, 0x73, 0x6f, 0x6e, 0x27, 0x2c, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x27, 0x41, 0x63, 0x63, 0x65, 0x70, 0x74, 0x27,
|
||||||
|
0x3a, 0x20, 0x27, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x65, 0x76, 0x65, 0x6e,
|
||||||
|
0x74, 0x2d, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x27, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x7d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x73, 0x69, 0x67,
|
||||||
|
0x6e, 0x61, 0x6c, 0x3a, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
|
||||||
|
0x6c, 0x65, 0x72, 0x2e, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x2c, 0x0a,
|
||||||
|
0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x73, 0x74, 0x20, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20,
|
||||||
|
0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x62, 0x6f, 0x64,
|
||||||
|
0x79, 0x2e, 0x67, 0x65, 0x74, 0x52, 0x65, 0x61, 0x64, 0x65, 0x72, 0x28,
|
||||||
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x64,
|
||||||
|
0x65, 0x63, 0x6f, 0x64, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77,
|
||||||
|
0x20, 0x54, 0x65, 0x78, 0x74, 0x44, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x72,
|
||||||
|
0x28, 0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63,
|
||||||
|
0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b,
|
||||||
|
0x0a, 0x0a, 0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x20, 0x3d,
|
||||||
|
0x20, 0x74, 0x72, 0x75, 0x65, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x77, 0x68, 0x69, 0x6c, 0x65, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x29,
|
||||||
|
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x20, 0x3d, 0x20,
|
||||||
|
0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x72, 0x65, 0x61, 0x64, 0x65, 0x72,
|
||||||
|
0x2e, 0x72, 0x65, 0x61, 0x64, 0x28, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c,
|
||||||
|
0x74, 0x2e, 0x64, 0x6f, 0x6e, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x3b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x73, 0x65, 0x20, 0x61,
|
||||||
|
0x6e, 0x73, 0x77, 0x65, 0x72, 0x73, 0x20, 0x69, 0x6e, 0x20, 0x74, 0x68,
|
||||||
|
0x65, 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x20, 0x6d, 0x75, 0x6c, 0x74, 0x69,
|
||||||
|
0x70, 0x6c, 0x65, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x73, 0x20, 0x6f, 0x66,
|
||||||
|
0x3a, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5c, 0x6e, 0x20, 0x77, 0x69,
|
||||||
|
0x74, 0x68, 0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x61, 0x6c, 0x77, 0x61,
|
||||||
|
0x79, 0x73, 0x20, 0x70, 0x72, 0x65, 0x73, 0x65, 0x6e, 0x74, 0x20, 0x61,
|
||||||
|
0x73, 0x20, 0x61, 0x20, 0x6b, 0x65, 0x79, 0x2e, 0x20, 0x69, 0x6e, 0x20,
|
||||||
|
0x6f, 0x75, 0x72, 0x20, 0x63, 0x61, 0x73, 0x65, 0x20, 0x77, 0x65, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x6d, 0x61, 0x69,
|
||||||
|
0x6e, 0x6c, 0x79, 0x20, 0x63, 0x61, 0x72, 0x65, 0x20, 0x61, 0x62, 0x6f,
|
||||||
|
0x75, 0x74, 0x20, 0x74, 0x68, 0x65, 0x20, 0x64, 0x61, 0x74, 0x61, 0x3a,
|
||||||
|
0x20, 0x6b, 0x65, 0x79, 0x20, 0x68, 0x65, 0x72, 0x65, 0x2c, 0x20, 0x77,
|
||||||
|
0x68, 0x69, 0x63, 0x68, 0x20, 0x77, 0x65, 0x20, 0x65, 0x78, 0x70, 0x65,
|
||||||
|
0x63, 0x74, 0x20, 0x61, 0x73, 0x20, 0x6a, 0x73, 0x6f, 0x6e, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x74,
|
||||||
|
0x65, 0x78, 0x74, 0x20, 0x3d, 0x20, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65,
|
||||||
|
0x72, 0x2e, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x28, 0x72, 0x65, 0x73,
|
||||||
|
0x75, 0x6c, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x70, 0x61,
|
||||||
|
0x72, 0x73, 0x65, 0x20, 0x61, 0x6c, 0x6c, 0x20, 0x73, 0x73, 0x65, 0x20,
|
||||||
|
0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x61,
|
||||||
|
0x64, 0x64, 0x20, 0x74, 0x68, 0x65, 0x6d, 0x20, 0x74, 0x6f, 0x20, 0x72,
|
||||||
|
0x65, 0x73, 0x75, 0x6c, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x67, 0x65, 0x78, 0x20,
|
||||||
|
0x3d, 0x20, 0x2f, 0x5e, 0x28, 0x5c, 0x53, 0x2b, 0x29, 0x3a, 0x5c, 0x73,
|
||||||
|
0x28, 0x2e, 0x2a, 0x29, 0x24, 0x2f, 0x67, 0x6d, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x28, 0x63, 0x6f, 0x6e,
|
||||||
|
0x73, 0x74, 0x20, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x20, 0x6f, 0x66, 0x20,
|
||||||
|
0x74, 0x65, 0x78, 0x74, 0x2e, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x41, 0x6c,
|
||||||
|
0x6c, 0x28, 0x72, 0x65, 0x67, 0x65, 0x78, 0x29, 0x29, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75,
|
||||||
|
0x6c, 0x74, 0x5b, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5b, 0x31, 0x5d, 0x5d,
|
||||||
|
0x20, 0x3d, 0x20, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x5b, 0x32, 0x5d, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73, 0x69, 0x6e, 0x63, 0x65, 0x20,
|
||||||
|
0x77, 0x65, 0x20, 0x6b, 0x6e, 0x6f, 0x77, 0x20, 0x74, 0x68, 0x69, 0x73,
|
||||||
|
0x20, 0x69, 0x73, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70,
|
||||||
|
0x70, 0x2c, 0x20, 0x6c, 0x65, 0x74, 0x27, 0x73, 0x20, 0x6a, 0x75, 0x73,
|
||||||
|
0x74, 0x20, 0x64, 0x65, 0x63, 0x6f, 0x64, 0x65, 0x20, 0x74, 0x68, 0x65,
|
||||||
|
0x20, 0x6a, 0x73, 0x6f, 0x6e, 0x20, 0x69, 0x6e, 0x20, 0x64, 0x61, 0x74,
|
||||||
|
0x61, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x75,
|
||||||
|
0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x4a, 0x53,
|
||||||
|
0x4f, 0x4e, 0x2e, 0x70, 0x61, 0x72, 0x73, 0x65, 0x28, 0x72, 0x65, 0x73,
|
||||||
|
0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x29, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74,
|
||||||
|
0x20, 0x2b, 0x3d, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64,
|
||||||
|
0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b,
|
||||||
|
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x79,
|
||||||
|
0x69, 0x65, 0x6c, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x79,
|
||||||
|
0x69, 0x65, 0x6c, 0x64, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x3b,
|
||||||
|
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x69,
|
||||||
|
0x66, 0x20, 0x77, 0x65, 0x20, 0x67, 0x6f, 0x74, 0x20, 0x61, 0x20, 0x73,
|
||||||
|
0x74, 0x6f, 0x70, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x20, 0x66, 0x72,
|
||||||
|
0x6f, 0x6d, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2c, 0x20, 0x77,
|
||||||
|
0x65, 0x20, 0x77, 0x69, 0x6c, 0x6c, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b,
|
||||||
|
0x20, 0x68, 0x65, 0x72, 0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x69, 0x66, 0x20, 0x28, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64,
|
||||||
|
0x61, 0x74, 0x61, 0x2e, 0x73, 0x74, 0x6f, 0x70, 0x29, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28,
|
||||||
|
0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e,
|
||||||
|
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73,
|
||||||
|
0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, 0x65, 0x6e,
|
||||||
|
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74,
|
||||||
|
0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x72, 0x65, 0x73, 0x75, 0x6c,
|
||||||
|
0x74, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72,
|
||||||
|
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e,
|
||||||
|
0x67, 0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x72,
|
||||||
|
0x65, 0x61, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x7d, 0x20, 0x63,
|
||||||
|
0x61, 0x74, 0x63, 0x68, 0x20, 0x28, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x2e, 0x6e, 0x61, 0x6d,
|
||||||
|
0x65, 0x20, 0x21, 0x3d, 0x3d, 0x20, 0x27, 0x41, 0x62, 0x6f, 0x72, 0x74,
|
||||||
|
0x45, 0x72, 0x72, 0x6f, 0x72, 0x27, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e,
|
||||||
|
0x65, 0x72, 0x72, 0x6f, 0x72, 0x28, 0x22, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
||||||
|
0x20, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x3a, 0x20, 0x22, 0x2c, 0x20, 0x65,
|
||||||
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x74, 0x68, 0x72, 0x6f, 0x77, 0x20, 0x65, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x7d, 0x0a, 0x20, 0x20, 0x66, 0x69, 0x6e, 0x61, 0x6c, 0x6c, 0x79, 0x20,
|
||||||
|
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f,
|
||||||
|
0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x28, 0x29,
|
||||||
|
0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74,
|
||||||
|
0x75, 0x72, 0x6e, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b,
|
||||||
|
0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c, 0x6c, 0x20,
|
||||||
|
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
|
||||||
|
0x6e, 0x20, 0x61, 0x6e, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x20, 0x74,
|
||||||
|
0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x74, 0x68, 0x61, 0x74, 0x20, 0x79,
|
||||||
|
0x6f, 0x75, 0x20, 0x63, 0x61, 0x6e, 0x20, 0x73, 0x75, 0x62, 0x63, 0x72,
|
||||||
|
0x69, 0x62, 0x65, 0x20, 0x74, 0x6f, 0x0a, 0x2f, 0x2f, 0x0a, 0x2f, 0x2f,
|
||||||
|
0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x3a, 0x0a, 0x2f, 0x2f,
|
||||||
|
0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72,
|
||||||
|
0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x45, 0x76, 0x65,
|
||||||
|
0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x20, 0x7d, 0x20, 0x66,
|
||||||
|
0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65,
|
||||||
|
0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6a, 0x73, 0x27, 0x0a, 0x2f, 0x2f, 0x0a,
|
||||||
|
0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x6e, 0x20, 0x3d, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
||||||
|
0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x28,
|
||||||
|
0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x63, 0x6f, 0x6e, 0x6e, 0x2e, 0x61, 0x64, 0x64, 0x45, 0x76,
|
||||||
|
0x65, 0x6e, 0x74, 0x4c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x65, 0x72, 0x28,
|
||||||
|
0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2c, 0x20, 0x28,
|
||||||
|
0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
||||||
|
0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x6f, 0x63, 0x75,
|
||||||
|
0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69, 0x74, 0x65, 0x28, 0x63,
|
||||||
|
0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x2e,
|
||||||
|
0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x2f, 0x2f, 0x0a, 0x65, 0x78, 0x70,
|
||||||
|
0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c,
|
||||||
|
0x61, 0x6d, 0x61, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67,
|
||||||
|
0x65, 0x74, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74,
|
||||||
|
0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x7b,
|
||||||
|
0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x3d, 0x20,
|
||||||
|
0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x63,
|
||||||
|
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61,
|
||||||
|
0x72, 0x67, 0x65, 0x74, 0x20, 0x3d, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x45,
|
||||||
|
0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x28, 0x29,
|
||||||
|
0x3b, 0x0a, 0x20, 0x20, 0x28, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28,
|
||||||
|
0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6c,
|
||||||
|
0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d,
|
||||||
|
0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72,
|
||||||
|
0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73,
|
||||||
|
0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c,
|
||||||
|
0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c,
|
||||||
|
0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x66, 0x69, 0x67, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e,
|
||||||
|
0x64, 0x61, 0x74, 0x61, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20,
|
||||||
|
0x2b, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74,
|
||||||
|
0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74,
|
||||||
|
0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61,
|
||||||
|
0x74, 0x63, 0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77,
|
||||||
|
0x20, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74,
|
||||||
|
0x28, 0x22, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2c, 0x20,
|
||||||
|
0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68,
|
||||||
|
0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x20, 0x7d, 0x29, 0x29,
|
||||||
|
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x68, 0x75, 0x6e,
|
||||||
|
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72,
|
||||||
|
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e,
|
||||||
|
0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
|
||||||
|
0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76,
|
||||||
|
0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74,
|
||||||
|
0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x67, 0x65, 0x6e,
|
||||||
|
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74,
|
||||||
|
0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74,
|
||||||
|
0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64,
|
||||||
|
0x61, 0x74, 0x61, 0x2e, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69,
|
||||||
|
0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20,
|
||||||
|
0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63,
|
||||||
|
0x68, 0x75, 0x6e, 0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69,
|
||||||
|
0x6d, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61,
|
||||||
|
0x72, 0x67, 0x65, 0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63,
|
||||||
|
0x68, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43,
|
||||||
|
0x75, 0x73, 0x74, 0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22,
|
||||||
|
0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67, 0x73, 0x22, 0x2c, 0x20, 0x7b, 0x20,
|
||||||
|
0x64, 0x65, 0x74, 0x61, 0x69, 0x6c, 0x3a, 0x20, 0x63, 0x68, 0x75, 0x6e,
|
||||||
|
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e,
|
||||||
|
0x67, 0x73, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65,
|
||||||
|
0x74, 0x2e, 0x64, 0x69, 0x73, 0x70, 0x61, 0x74, 0x63, 0x68, 0x45, 0x76,
|
||||||
|
0x65, 0x6e, 0x74, 0x28, 0x6e, 0x65, 0x77, 0x20, 0x43, 0x75, 0x73, 0x74,
|
||||||
|
0x6f, 0x6d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x28, 0x22, 0x64, 0x6f, 0x6e,
|
||||||
|
0x65, 0x22, 0x2c, 0x20, 0x7b, 0x20, 0x64, 0x65, 0x74, 0x61, 0x69, 0x6c,
|
||||||
|
0x3a, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20,
|
||||||
|
0x7d, 0x20, 0x7d, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x7d, 0x29, 0x28,
|
||||||
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20,
|
||||||
|
0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x3b,
|
||||||
|
0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x43, 0x61, 0x6c, 0x6c, 0x20,
|
||||||
|
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
|
||||||
|
0x6e, 0x20, 0x61, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x20,
|
||||||
|
0x74, 0x68, 0x61, 0x74, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65,
|
||||||
|
0x73, 0x20, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6d,
|
||||||
|
0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2e,
|
||||||
|
0x20, 0x54, 0x68, 0x69, 0x73, 0x20, 0x64, 0x6f, 0x65, 0x73, 0x20, 0x6e,
|
||||||
|
0x6f, 0x74, 0x20, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x73,
|
||||||
|
0x74, 0x72, 0x65, 0x61, 0x6d, 0x69, 0x6e, 0x67, 0x0a, 0x2f, 0x2f, 0x0a,
|
||||||
|
0x2f, 0x2f, 0x20, 0x45, 0x78, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x3a, 0x0a,
|
||||||
|
0x2f, 0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6c,
|
||||||
|
0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x70,
|
||||||
|
0x72, 0x6f, 0x6d, 0x70, 0x74, 0x29, 0x2e, 0x74, 0x68, 0x65, 0x6e, 0x28,
|
||||||
|
0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, 0x3e,
|
||||||
|
0x20, 0x7b, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x64, 0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69,
|
||||||
|
0x74, 0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a,
|
||||||
|
0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x29, 0x0a, 0x2f, 0x2f,
|
||||||
|
0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6f, 0x72, 0x0a, 0x2f,
|
||||||
|
0x2f, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x20, 0x3d,
|
||||||
|
0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
||||||
|
0x50, 0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x70, 0x72, 0x6f, 0x6d,
|
||||||
|
0x70, 0x74, 0x29, 0x0a, 0x2f, 0x2f, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64,
|
||||||
|
0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x72, 0x69, 0x74,
|
||||||
|
0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x0a, 0x2f,
|
||||||
|
0x2f, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x50, 0x72, 0x6f, 0x6d,
|
||||||
|
0x69, 0x73, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70,
|
||||||
|
0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20,
|
||||||
|
0x7b, 0x7d, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x20, 0x3d,
|
||||||
|
0x20, 0x7b, 0x7d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x6e, 0x65, 0x77, 0x20, 0x50,
|
||||||
|
0x72, 0x6f, 0x6d, 0x69, 0x73, 0x65, 0x28, 0x61, 0x73, 0x79, 0x6e, 0x63,
|
||||||
|
0x20, 0x28, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76, 0x65, 0x2c, 0x20, 0x72,
|
||||||
|
0x65, 0x6a, 0x65, 0x63, 0x74, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74,
|
||||||
|
0x65, 0x6e, 0x74, 0x20, 0x3d, 0x20, 0x22, 0x22, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x74, 0x72, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20,
|
||||||
|
0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b,
|
||||||
|
0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x72,
|
||||||
|
0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
||||||
|
0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x29, 0x29, 0x20, 0x7b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x74, 0x65, 0x6e, 0x74, 0x20, 0x2b, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e,
|
||||||
|
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65,
|
||||||
|
0x6e, 0x74, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x73, 0x6f, 0x6c, 0x76,
|
||||||
|
0x65, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x63, 0x61, 0x74, 0x63, 0x68, 0x20,
|
||||||
|
0x28, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6a, 0x65, 0x63, 0x74, 0x28, 0x65,
|
||||||
|
0x72, 0x72, 0x6f, 0x72, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
||||||
|
0x0a, 0x20, 0x20, 0x7d, 0x29, 0x3b, 0x0a, 0x7d, 0x3b, 0x0a, 0x0a, 0x2f,
|
||||||
|
0x2a, 0x2a, 0x0a, 0x20, 0x2a, 0x20, 0x28, 0x64, 0x65, 0x70, 0x72, 0x65,
|
||||||
|
0x63, 0x61, 0x74, 0x65, 0x64, 0x29, 0x0a, 0x20, 0x2a, 0x2f, 0x0a, 0x65,
|
||||||
|
0x78, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20,
|
||||||
|
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74,
|
||||||
|
0x65, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x70,
|
||||||
|
0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72,
|
||||||
|
0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2c, 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x62,
|
||||||
|
0x61, 0x63, 0x6b, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61, 0x69, 0x74, 0x20, 0x28, 0x63,
|
||||||
|
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x20, 0x6f,
|
||||||
|
0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x28, 0x70, 0x61, 0x72, 0x61,
|
||||||
|
0x6d, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x70,
|
||||||
|
0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x7b, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x20, 0x7d, 0x29, 0x29, 0x20,
|
||||||
|
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x61, 0x6c, 0x6c, 0x62, 0x61,
|
||||||
|
0x63, 0x6b, 0x28, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x29, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x7d, 0x0a, 0x7d, 0x0a, 0x0a, 0x2f, 0x2f, 0x20, 0x47, 0x65, 0x74,
|
||||||
|
0x20, 0x74, 0x68, 0x65, 0x20, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x20, 0x69,
|
||||||
|
0x6e, 0x66, 0x6f, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x74, 0x68, 0x65,
|
||||||
|
0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x2e, 0x20, 0x54, 0x68, 0x69,
|
||||||
|
0x73, 0x20, 0x69, 0x73, 0x20, 0x75, 0x73, 0x65, 0x66, 0x75, 0x6c, 0x20,
|
||||||
|
0x66, 0x6f, 0x72, 0x20, 0x67, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x20,
|
||||||
|
0x74, 0x68, 0x65, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x78, 0x74, 0x20,
|
||||||
|
0x77, 0x69, 0x6e, 0x64, 0x6f, 0x77, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x73,
|
||||||
|
0x6f, 0x20, 0x6f, 0x6e, 0x2e, 0x0a, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74,
|
||||||
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
||||||
|
0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x49, 0x6e, 0x66, 0x6f, 0x20, 0x3d, 0x20,
|
||||||
|
0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20,
|
||||||
|
0x7b, 0x0a, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x67, 0x65, 0x6e,
|
||||||
|
0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65, 0x74, 0x74,
|
||||||
|
0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73,
|
||||||
|
0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x61, 0x77,
|
||||||
|
0x61, 0x69, 0x74, 0x20, 0x66, 0x65, 0x74, 0x63, 0x68, 0x28, 0x22, 0x2f,
|
||||||
|
0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2e, 0x6a, 0x73, 0x6f, 0x6e, 0x22, 0x29,
|
||||||
|
0x2e, 0x74, 0x68, 0x65, 0x6e, 0x28, 0x72, 0x20, 0x3d, 0x3e, 0x20, 0x72,
|
||||||
|
0x2e, 0x6a, 0x73, 0x6f, 0x6e, 0x28, 0x29, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x7d, 0x0a, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x67,
|
||||||
|
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x73, 0x65,
|
||||||
|
0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x3b, 0x0a, 0x7d, 0x0a
|
||||||
|
};
|
||||||
|
unsigned int completion_js_len = 4462;
|
18
examples/server/deps.sh
Executable file
18
examples/server/deps.sh
Executable file
|
@ -0,0 +1,18 @@
|
||||||
|
#!/bin/bash
|
||||||
|
# Download and update deps for binary
|
||||||
|
|
||||||
|
# get the directory of this script file
|
||||||
|
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
|
||||||
|
PUBLIC=$DIR/public
|
||||||
|
|
||||||
|
echo "download js bundle files"
|
||||||
|
curl https://npm.reversehttp.com/@preact/signals-core,@preact/signals,htm/preact,preact,preact/hooks > $PUBLIC/index.js
|
||||||
|
echo >> $PUBLIC/index.js # add newline
|
||||||
|
|
||||||
|
FILES=$(ls $PUBLIC)
|
||||||
|
|
||||||
|
for FILE in $FILES; do
|
||||||
|
func=$(echo $FILE | tr '.' '_')
|
||||||
|
echo "generate $FILE.hpp ($func)"
|
||||||
|
xxd -n $func -i $PUBLIC/$FILE > $DIR/$FILE.hpp
|
||||||
|
done
|
899
examples/server/index.html.hpp
Normal file
899
examples/server/index.html.hpp
Normal file
|
@ -0,0 +1,899 @@
|
||||||
|
unsigned char index_html[] = {
|
||||||
|
0x3c, 0x68, 0x74, 0x6d, 0x6c, 0x3e, 0x0a, 0x0a, 0x3c, 0x68, 0x65, 0x61,
|
||||||
|
0x64, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x6d, 0x65, 0x74, 0x61, 0x20, 0x63,
|
||||||
|
0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x22, 0x55, 0x54, 0x46, 0x2d,
|
||||||
|
0x38, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x3c, 0x6d, 0x65, 0x74, 0x61, 0x20,
|
||||||
|
0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x76, 0x69, 0x65, 0x77, 0x70, 0x6f,
|
||||||
|
0x72, 0x74, 0x22, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3d,
|
||||||
|
0x22, 0x77, 0x69, 0x64, 0x74, 0x68, 0x3d, 0x64, 0x65, 0x76, 0x69, 0x63,
|
||||||
|
0x65, 0x2d, 0x77, 0x69, 0x64, 0x74, 0x68, 0x2c, 0x20, 0x69, 0x6e, 0x69,
|
||||||
|
0x74, 0x69, 0x61, 0x6c, 0x2d, 0x73, 0x63, 0x61, 0x6c, 0x65, 0x3d, 0x31,
|
||||||
|
0x2c, 0x20, 0x6d, 0x61, 0x78, 0x69, 0x6d, 0x75, 0x6d, 0x2d, 0x73, 0x63,
|
||||||
|
0x61, 0x6c, 0x65, 0x3d, 0x31, 0x22, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20,
|
||||||
|
0x3c, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x3e, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
||||||
|
0x2e, 0x63, 0x70, 0x70, 0x20, 0x2d, 0x20, 0x63, 0x68, 0x61, 0x74, 0x3c,
|
||||||
|
0x2f, 0x74, 0x69, 0x74, 0x6c, 0x65, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x3c,
|
||||||
|
0x73, 0x74, 0x79, 0x6c, 0x65, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x62,
|
||||||
|
0x6f, 0x64, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x62, 0x61, 0x63, 0x6b, 0x67, 0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2d, 0x63,
|
||||||
|
0x6f, 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x23, 0x66, 0x66, 0x66, 0x3b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x3a,
|
||||||
|
0x20, 0x23, 0x30, 0x30, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x66, 0x6f, 0x6e, 0x74, 0x2d, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79,
|
||||||
|
0x3a, 0x20, 0x73, 0x79, 0x73, 0x74, 0x65, 0x6d, 0x2d, 0x75, 0x69, 0x3b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x6e, 0x74, 0x2d,
|
||||||
|
0x73, 0x69, 0x7a, 0x65, 0x3a, 0x20, 0x39, 0x30, 0x25, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x23, 0x63,
|
||||||
|
0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x61, 0x72, 0x67, 0x69, 0x6e, 0x3a,
|
||||||
|
0x20, 0x30, 0x65, 0x6d, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79,
|
||||||
|
0x3a, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x2d, 0x64, 0x69, 0x72, 0x65, 0x63,
|
||||||
|
0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x63, 0x6f, 0x6c, 0x75, 0x6d, 0x6e,
|
||||||
|
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6a, 0x75, 0x73, 0x74,
|
||||||
|
0x69, 0x66, 0x79, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3a,
|
||||||
|
0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2d, 0x62, 0x65, 0x74, 0x77, 0x65,
|
||||||
|
0x65, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x65,
|
||||||
|
0x69, 0x67, 0x68, 0x74, 0x3a, 0x20, 0x31, 0x30, 0x30, 0x25, 0x3b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x6d,
|
||||||
|
0x61, 0x69, 0x6e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x6d, 0x61, 0x72, 0x67, 0x69, 0x6e, 0x3a, 0x20, 0x33, 0x70, 0x78, 0x3b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73, 0x70, 0x6c,
|
||||||
|
0x61, 0x79, 0x3a, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x2d, 0x64, 0x69, 0x72,
|
||||||
|
0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x63, 0x6f, 0x6c, 0x75,
|
||||||
|
0x6d, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6a, 0x75,
|
||||||
|
0x73, 0x74, 0x69, 0x66, 0x79, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e,
|
||||||
|
0x74, 0x3a, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x2d, 0x62, 0x65, 0x74,
|
||||||
|
0x77, 0x65, 0x65, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x67, 0x61, 0x70, 0x3a, 0x20, 0x31, 0x65, 0x6d, 0x3b, 0x0a, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x2d, 0x67, 0x72,
|
||||||
|
0x6f, 0x77, 0x3a, 0x20, 0x31, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x2d, 0x79, 0x3a,
|
||||||
|
0x20, 0x61, 0x75, 0x74, 0x6f, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x62, 0x6f, 0x72, 0x64, 0x65, 0x72, 0x3a, 0x20, 0x31, 0x70,
|
||||||
|
0x78, 0x20, 0x73, 0x6f, 0x6c, 0x69, 0x64, 0x20, 0x23, 0x63, 0x63, 0x63,
|
||||||
|
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x6f, 0x72, 0x64,
|
||||||
|
0x65, 0x72, 0x2d, 0x72, 0x61, 0x64, 0x69, 0x75, 0x73, 0x3a, 0x20, 0x35,
|
||||||
|
0x70, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61,
|
||||||
|
0x64, 0x64, 0x69, 0x6e, 0x67, 0x3a, 0x20, 0x30, 0x2e, 0x35, 0x65, 0x6d,
|
||||||
|
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x62, 0x6f, 0x64, 0x79, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x6d, 0x61, 0x78, 0x2d, 0x77, 0x69, 0x64, 0x74, 0x68, 0x3a,
|
||||||
|
0x20, 0x36, 0x30, 0x30, 0x70, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x6d, 0x69, 0x6e, 0x2d, 0x77, 0x69, 0x64, 0x74, 0x68, 0x3a,
|
||||||
|
0x20, 0x33, 0x30, 0x30, 0x70, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x6c, 0x69, 0x6e, 0x65, 0x2d, 0x68, 0x65, 0x69, 0x67, 0x68,
|
||||||
|
0x74, 0x3a, 0x20, 0x31, 0x2e, 0x32, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x6d, 0x61, 0x72, 0x67, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x20,
|
||||||
|
0x61, 0x75, 0x74, 0x6f, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x3a, 0x20, 0x30, 0x20, 0x30,
|
||||||
|
0x2e, 0x35, 0x65, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x70, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x6f, 0x76, 0x65, 0x72, 0x66, 0x6c, 0x6f, 0x77, 0x2d,
|
||||||
|
0x77, 0x72, 0x61, 0x70, 0x3a, 0x20, 0x62, 0x72, 0x65, 0x61, 0x6b, 0x2d,
|
||||||
|
0x77, 0x6f, 0x72, 0x64, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x77, 0x6f, 0x72, 0x64, 0x2d, 0x77, 0x72, 0x61, 0x70, 0x3a, 0x20, 0x62,
|
||||||
|
0x72, 0x65, 0x61, 0x6b, 0x2d, 0x77, 0x6f, 0x72, 0x64, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x79, 0x70, 0x68, 0x65, 0x6e, 0x73,
|
||||||
|
0x3a, 0x20, 0x61, 0x75, 0x74, 0x6f, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x6d, 0x61, 0x72, 0x67, 0x69, 0x6e, 0x2d, 0x74, 0x6f, 0x70,
|
||||||
|
0x3a, 0x20, 0x30, 0x2e, 0x35, 0x65, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x6d, 0x61, 0x72, 0x67, 0x69, 0x6e, 0x2d, 0x62, 0x6f,
|
||||||
|
0x74, 0x74, 0x6f, 0x6d, 0x3a, 0x20, 0x30, 0x2e, 0x35, 0x65, 0x6d, 0x3b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x23, 0x77, 0x72, 0x69, 0x74, 0x65, 0x20, 0x66, 0x6f, 0x72, 0x6d, 0x20,
|
||||||
|
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x61, 0x72, 0x67,
|
||||||
|
0x69, 0x6e, 0x3a, 0x20, 0x31, 0x65, 0x6d, 0x20, 0x30, 0x20, 0x30, 0x20,
|
||||||
|
0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73,
|
||||||
|
0x70, 0x6c, 0x61, 0x79, 0x3a, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x3b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x2d, 0x64,
|
||||||
|
0x69, 0x72, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x63, 0x6f,
|
||||||
|
0x6c, 0x75, 0x6d, 0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x67, 0x61, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x35, 0x65, 0x6d, 0x3b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x61, 0x6c, 0x69, 0x67, 0x6e, 0x2d,
|
||||||
|
0x69, 0x74, 0x65, 0x6d, 0x73, 0x3a, 0x20, 0x73, 0x74, 0x72, 0x65, 0x74,
|
||||||
|
0x63, 0x68, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x2e, 0x72, 0x69, 0x67, 0x68, 0x74, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61,
|
||||||
|
0x79, 0x3a, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x2d, 0x64, 0x69, 0x72, 0x65,
|
||||||
|
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3a, 0x20, 0x72, 0x6f, 0x77, 0x3b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x67, 0x61, 0x70, 0x3a, 0x20, 0x30,
|
||||||
|
0x2e, 0x35, 0x65, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x6a, 0x75, 0x73, 0x74, 0x69, 0x66, 0x79, 0x2d, 0x63, 0x6f, 0x6e, 0x74,
|
||||||
|
0x65, 0x6e, 0x74, 0x3a, 0x20, 0x66, 0x6c, 0x65, 0x78, 0x2d, 0x65, 0x6e,
|
||||||
|
0x64, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x20, 0x7b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x6f, 0x72, 0x64, 0x65,
|
||||||
|
0x72, 0x3a, 0x20, 0x6e, 0x6f, 0x6e, 0x65, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x3a, 0x20,
|
||||||
|
0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x61, 0x72,
|
||||||
|
0x67, 0x69, 0x6e, 0x3a, 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x78, 0x74, 0x61,
|
||||||
|
0x72, 0x65, 0x61, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x70, 0x61, 0x64, 0x64, 0x69, 0x6e, 0x67, 0x3a, 0x20, 0x35, 0x70, 0x78,
|
||||||
|
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6c, 0x65, 0x78,
|
||||||
|
0x2d, 0x67, 0x72, 0x6f, 0x77, 0x3a, 0x20, 0x31, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x77, 0x69, 0x64, 0x74, 0x68, 0x3a, 0x20, 0x31,
|
||||||
|
0x30, 0x30, 0x25, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x70, 0x72, 0x65, 0x20, 0x63, 0x6f, 0x64, 0x65,
|
||||||
|
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73,
|
||||||
|
0x70, 0x6c, 0x61, 0x79, 0x3a, 0x20, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x3b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62, 0x61, 0x63, 0x6b, 0x67,
|
||||||
|
0x72, 0x6f, 0x75, 0x6e, 0x64, 0x2d, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x3a,
|
||||||
|
0x20, 0x23, 0x32, 0x32, 0x32, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x23, 0x64, 0x64, 0x64,
|
||||||
|
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x63, 0x6f, 0x64, 0x65, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x66, 0x6f, 0x6e, 0x74, 0x2d, 0x66, 0x61, 0x6d, 0x69, 0x6c, 0x79,
|
||||||
|
0x3a, 0x20, 0x6d, 0x6f, 0x6e, 0x6f, 0x73, 0x70, 0x61, 0x63, 0x65, 0x3b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x70, 0x61, 0x64, 0x64, 0x69,
|
||||||
|
0x6e, 0x67, 0x3a, 0x20, 0x30, 0x2e, 0x31, 0x65, 0x6d, 0x20, 0x30, 0x2e,
|
||||||
|
0x33, 0x65, 0x6d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x62,
|
||||||
|
0x6f, 0x72, 0x64, 0x65, 0x72, 0x2d, 0x72, 0x61, 0x64, 0x69, 0x75, 0x73,
|
||||||
|
0x3a, 0x20, 0x33, 0x70, 0x78, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
||||||
|
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73,
|
||||||
|
0x65, 0x74, 0x20, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x61, 0x72, 0x67, 0x69, 0x6e, 0x3a,
|
||||||
|
0x20, 0x30, 0x2e, 0x35, 0x65, 0x6d, 0x20, 0x30, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x64, 0x69, 0x73, 0x70, 0x6c, 0x61, 0x79, 0x3a,
|
||||||
|
0x20, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x68, 0x65, 0x61, 0x64, 0x65,
|
||||||
|
0x72, 0x2c, 0x20, 0x66, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x78, 0x74, 0x2d, 0x61,
|
||||||
|
0x6c, 0x69, 0x67, 0x6e, 0x3a, 0x20, 0x63, 0x65, 0x6e, 0x74, 0x65, 0x72,
|
||||||
|
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x66, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x6e, 0x74, 0x2d, 0x73, 0x69, 0x7a,
|
||||||
|
0x65, 0x3a, 0x20, 0x38, 0x30, 0x25, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x72, 0x3a, 0x20, 0x23, 0x38, 0x38,
|
||||||
|
0x38, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x3c,
|
||||||
|
0x2f, 0x73, 0x74, 0x79, 0x6c, 0x65, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x3c,
|
||||||
|
0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d,
|
||||||
|
0x22, 0x6d, 0x6f, 0x64, 0x75, 0x6c, 0x65, 0x22, 0x3e, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x20, 0x68,
|
||||||
|
0x2c, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x2c, 0x20, 0x65, 0x66,
|
||||||
|
0x66, 0x65, 0x63, 0x74, 0x2c, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74,
|
||||||
|
0x65, 0x64, 0x2c, 0x20, 0x72, 0x65, 0x6e, 0x64, 0x65, 0x72, 0x2c, 0x20,
|
||||||
|
0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x2c, 0x20, 0x75,
|
||||||
|
0x73, 0x65, 0x45, 0x66, 0x66, 0x65, 0x63, 0x74, 0x2c, 0x20, 0x75, 0x73,
|
||||||
|
0x65, 0x52, 0x65, 0x66, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x20, 0x66,
|
||||||
|
0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x69, 0x6e, 0x64, 0x65, 0x78, 0x2e,
|
||||||
|
0x6a, 0x73, 0x27, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x69, 0x6d,
|
||||||
|
0x70, 0x6f, 0x72, 0x74, 0x20, 0x7b, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
||||||
|
0x20, 0x7d, 0x20, 0x66, 0x72, 0x6f, 0x6d, 0x20, 0x27, 0x2f, 0x63, 0x6f,
|
||||||
|
0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x6a, 0x73, 0x27,
|
||||||
|
0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
||||||
|
0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x73,
|
||||||
|
0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x3a, 0x20, 0x22, 0x54,
|
||||||
|
0x68, 0x69, 0x73, 0x20, 0x69, 0x73, 0x20, 0x61, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x76, 0x65, 0x72, 0x73, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x62, 0x65,
|
||||||
|
0x74, 0x77, 0x65, 0x65, 0x6e, 0x20, 0x75, 0x73, 0x65, 0x72, 0x20, 0x61,
|
||||||
|
0x6e, 0x64, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2c, 0x20, 0x61, 0x20,
|
||||||
|
0x66, 0x72, 0x69, 0x65, 0x6e, 0x64, 0x6c, 0x79, 0x20, 0x63, 0x68, 0x61,
|
||||||
|
0x74, 0x62, 0x6f, 0x74, 0x2e, 0x20, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||||
|
0x64, 0x20, 0x69, 0x6e, 0x20, 0x73, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x20,
|
||||||
|
0x6d, 0x61, 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x2e, 0x22, 0x2c, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61,
|
||||||
|
0x74, 0x65, 0x3a, 0x20, 0x22, 0x7b, 0x7b, 0x70, 0x72, 0x6f, 0x6d, 0x70,
|
||||||
|
0x74, 0x7d, 0x7d, 0x5c, 0x6e, 0x5c, 0x6e, 0x7b, 0x7b, 0x68, 0x69, 0x73,
|
||||||
|
0x74, 0x6f, 0x72, 0x79, 0x7d, 0x7d, 0x5c, 0x6e, 0x7b, 0x7b, 0x63, 0x68,
|
||||||
|
0x61, 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d,
|
||||||
|
0x70, 0x6c, 0x61, 0x74, 0x65, 0x3a, 0x20, 0x22, 0x7b, 0x7b, 0x6e, 0x61,
|
||||||
|
0x6d, 0x65, 0x7d, 0x7d, 0x3a, 0x20, 0x7b, 0x7b, 0x6d, 0x65, 0x73, 0x73,
|
||||||
|
0x61, 0x67, 0x65, 0x7d, 0x7d, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
|
||||||
|
0x3a, 0x20, 0x5b, 0x5d, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x74, 0x79, 0x70, 0x65, 0x3a, 0x20, 0x22, 0x63, 0x68, 0x61, 0x74, 0x22,
|
||||||
|
0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x68, 0x61, 0x72,
|
||||||
|
0x3a, 0x20, 0x22, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x22, 0x2c, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x72, 0x3a, 0x20, 0x22,
|
||||||
|
0x55, 0x73, 0x65, 0x72, 0x22, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
||||||
|
0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
||||||
|
0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x69,
|
||||||
|
0x67, 0x6e, 0x61, 0x6c, 0x28, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x3a, 0x20,
|
||||||
|
0x34, 0x30, 0x30, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74,
|
||||||
|
0x65, 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3a, 0x20,
|
||||||
|
0x30, 0x2e, 0x37, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72,
|
||||||
|
0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e,
|
||||||
|
0x3a, 0x20, 0x32, 0x35, 0x36, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61,
|
||||||
|
0x6c, 0x74, 0x79, 0x3a, 0x20, 0x31, 0x2e, 0x31, 0x38, 0x2c, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x6f, 0x70, 0x5f, 0x6b, 0x3a, 0x20,
|
||||||
|
0x34, 0x30, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x6f,
|
||||||
|
0x70, 0x5f, 0x70, 0x3a, 0x20, 0x30, 0x2e, 0x35, 0x2c, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x7d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
||||||
|
0x6e, 0x73, 0x74, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61,
|
||||||
|
0x74, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28,
|
||||||
|
0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
||||||
|
0x6e, 0x73, 0x74, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
|
||||||
|
0x65, 0x72, 0x20, 0x3d, 0x20, 0x73, 0x69, 0x67, 0x6e, 0x61, 0x6c, 0x28,
|
||||||
|
0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63,
|
||||||
|
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74,
|
||||||
|
0x69, 0x6e, 0x67, 0x20, 0x3d, 0x20, 0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74,
|
||||||
|
0x65, 0x64, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
||||||
|
0x65, 0x20, 0x3d, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x20, 0x29, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68,
|
||||||
|
0x61, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x20, 0x3d, 0x20,
|
||||||
|
0x63, 0x6f, 0x6d, 0x70, 0x75, 0x74, 0x65, 0x64, 0x28, 0x28, 0x29, 0x20,
|
||||||
|
0x3d, 0x3e, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76,
|
||||||
|
0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72,
|
||||||
|
0x69, 0x70, 0x74, 0x2e, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x20, 0x3e,
|
||||||
|
0x20, 0x30, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x73, 0x74, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70,
|
||||||
|
0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x74,
|
||||||
|
0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x29, 0x20, 0x3d,
|
||||||
|
0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65,
|
||||||
|
0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20,
|
||||||
|
0x3d, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76,
|
||||||
|
0x61, 0x6c, 0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x73,
|
||||||
|
0x69, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61,
|
||||||
|
0x74, 0x65, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x74, 0x65, 0x6d,
|
||||||
|
0x70, 0x6c, 0x61, 0x74, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x73, 0x74, 0x72,
|
||||||
|
0x2c, 0x20, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69,
|
||||||
|
0x6e, 0x67, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x6c, 0x65, 0x74, 0x20, 0x73, 0x65, 0x74, 0x74,
|
||||||
|
0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69,
|
||||||
|
0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x65, 0x78, 0x74, 0x72,
|
||||||
|
0x61, 0x53, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x65, 0x74,
|
||||||
|
0x74, 0x69, 0x6e, 0x67, 0x73, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e, 0x2e,
|
||||||
|
0x2e, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e, 0x67, 0x73, 0x2c, 0x20, 0x2e,
|
||||||
|
0x2e, 0x2e, 0x65, 0x78, 0x74, 0x72, 0x61, 0x53, 0x65, 0x74, 0x74, 0x69,
|
||||||
|
0x6e, 0x67, 0x73, 0x20, 0x7d, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74,
|
||||||
|
0x75, 0x72, 0x6e, 0x20, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x28, 0x73,
|
||||||
|
0x74, 0x72, 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x41,
|
||||||
|
0x6c, 0x6c, 0x28, 0x2f, 0x5c, 0x7b, 0x5c, 0x7b, 0x28, 0x2e, 0x2a, 0x3f,
|
||||||
|
0x29, 0x5c, 0x7d, 0x5c, 0x7d, 0x2f, 0x67, 0x2c, 0x20, 0x28, 0x5f, 0x2c,
|
||||||
|
0x20, 0x6b, 0x65, 0x79, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x74, 0x65, 0x6d,
|
||||||
|
0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x73, 0x65, 0x74, 0x74, 0x69, 0x6e,
|
||||||
|
0x67, 0x73, 0x5b, 0x6b, 0x65, 0x79, 0x5d, 0x29, 0x29, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f,
|
||||||
|
0x20, 0x73, 0x65, 0x6e, 0x64, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||||
|
0x65, 0x20, 0x74, 0x6f, 0x20, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68,
|
||||||
|
0x61, 0x74, 0x20, 0x3d, 0x20, 0x61, 0x73, 0x79, 0x6e, 0x63, 0x20, 0x28,
|
||||||
|
0x6d, 0x73, 0x67, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74,
|
||||||
|
0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||||
|
0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28,
|
||||||
|
0x27, 0x61, 0x6c, 0x72, 0x65, 0x61, 0x64, 0x79, 0x20, 0x72, 0x75, 0x6e,
|
||||||
|
0x6e, 0x69, 0x6e, 0x67, 0x2e, 0x2e, 0x2e, 0x27, 0x29, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75, 0x72,
|
||||||
|
0x6e, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c,
|
||||||
|
0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20,
|
||||||
|
0x6e, 0x65, 0x77, 0x20, 0x41, 0x62, 0x6f, 0x72, 0x74, 0x43, 0x6f, 0x6e,
|
||||||
|
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x28, 0x29, 0x3b, 0x0a, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63,
|
||||||
|
0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x28, 0x5b,
|
||||||
|
0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76,
|
||||||
|
0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72,
|
||||||
|
0x69, 0x70, 0x74, 0x2c, 0x20, 0x5b, 0x22, 0x7b, 0x7b, 0x75, 0x73, 0x65,
|
||||||
|
0x72, 0x7d, 0x7d, 0x22, 0x2c, 0x20, 0x6d, 0x73, 0x67, 0x5d, 0x5d, 0x29,
|
||||||
|
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73,
|
||||||
|
0x74, 0x20, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x3d, 0x20, 0x74,
|
||||||
|
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x73, 0x65, 0x73, 0x73,
|
||||||
|
0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65,
|
||||||
|
0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||||
|
0x65, 0x3a, 0x20, 0x6d, 0x73, 0x67, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x3a,
|
||||||
|
0x20, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c,
|
||||||
|
0x75, 0x65, 0x2e, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70,
|
||||||
|
0x74, 0x2e, 0x66, 0x6c, 0x61, 0x74, 0x4d, 0x61, 0x70, 0x28, 0x28, 0x5b,
|
||||||
|
0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||||
|
0x65, 0x5d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c,
|
||||||
|
0x61, 0x74, 0x65, 0x28, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e,
|
||||||
|
0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72,
|
||||||
|
0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x20, 0x7b,
|
||||||
|
0x6e, 0x61, 0x6d, 0x65, 0x2c, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||||
|
0x65, 0x7d, 0x29, 0x29, 0x2e, 0x6a, 0x6f, 0x69, 0x6e, 0x28, 0x22, 0x5c,
|
||||||
|
0x6e, 0x22, 0x29, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
||||||
|
0x29, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x65,
|
||||||
|
0x74, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73,
|
||||||
|
0x73, 0x61, 0x67, 0x65, 0x20, 0x3d, 0x20, 0x27, 0x27, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x68,
|
||||||
|
0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73,
|
||||||
|
0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74,
|
||||||
|
0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x0a, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6c,
|
||||||
|
0x6c, 0x61, 0x6d, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d,
|
||||||
|
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e,
|
||||||
|
0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c,
|
||||||
|
0x75, 0x65, 0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x73, 0x74, 0x6f, 0x70, 0x3a, 0x20, 0x5b, 0x22, 0x3c, 0x2f, 0x73, 0x3e,
|
||||||
|
0x22, 0x2c, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28,
|
||||||
|
0x22, 0x7b, 0x7b, 0x63, 0x68, 0x61, 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x29,
|
||||||
|
0x2c, 0x20, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x22,
|
||||||
|
0x7b, 0x7b, 0x75, 0x73, 0x65, 0x72, 0x7d, 0x7d, 0x3a, 0x22, 0x29, 0x5d,
|
||||||
|
0x2c, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x66, 0x6f, 0x72, 0x20, 0x61, 0x77, 0x61,
|
||||||
|
0x69, 0x74, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68,
|
||||||
|
0x75, 0x6e, 0x6b, 0x20, 0x6f, 0x66, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61,
|
||||||
|
0x28, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x2c, 0x20, 0x6c, 0x6c, 0x61,
|
||||||
|
0x6d, 0x61, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2c, 0x20, 0x7b, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x3a, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76,
|
||||||
|
0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x29, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
||||||
|
0x20, 0x64, 0x61, 0x74, 0x61, 0x20, 0x3d, 0x20, 0x63, 0x68, 0x75, 0x6e,
|
||||||
|
0x6b, 0x2e, 0x64, 0x61, 0x74, 0x61, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d,
|
||||||
|
0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x20, 0x2b, 0x3d, 0x20, 0x64, 0x61,
|
||||||
|
0x74, 0x61, 0x2e, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x3b, 0x0a,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20,
|
||||||
|
0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x20, 0x6c, 0x65, 0x61, 0x64, 0x69,
|
||||||
|
0x6e, 0x67, 0x20, 0x77, 0x68, 0x69, 0x74, 0x65, 0x73, 0x70, 0x61, 0x63,
|
||||||
|
0x65, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x75,
|
||||||
|
0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65,
|
||||||
|
0x20, 0x3d, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65,
|
||||||
|
0x73, 0x73, 0x61, 0x67, 0x65, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63,
|
||||||
|
0x65, 0x28, 0x2f, 0x5e, 0x5c, 0x73, 0x2b, 0x2f, 0x2c, 0x20, 0x22, 0x22,
|
||||||
|
0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74,
|
||||||
|
0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64,
|
||||||
|
0x61, 0x74, 0x65, 0x28, 0x5b, 0x2e, 0x2e, 0x2e, 0x68, 0x69, 0x73, 0x74,
|
||||||
|
0x6f, 0x72, 0x79, 0x2c, 0x20, 0x5b, 0x22, 0x7b, 0x7b, 0x63, 0x68, 0x61,
|
||||||
|
0x72, 0x7d, 0x7d, 0x22, 0x2c, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
|
||||||
|
0x74, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x5d, 0x5d, 0x29, 0x0a,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69, 0x66, 0x20,
|
||||||
|
0x28, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x73, 0x74, 0x6f, 0x70, 0x29, 0x20,
|
||||||
|
0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x73, 0x6f, 0x6c, 0x65, 0x2e, 0x6c, 0x6f, 0x67, 0x28,
|
||||||
|
0x22, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x69, 0x6f, 0x6e, 0x20,
|
||||||
|
0x66, 0x69, 0x6e, 0x69, 0x73, 0x68, 0x65, 0x64, 0x3a, 0x20, 0x27, 0x22,
|
||||||
|
0x2c, 0x20, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x4d, 0x65, 0x73,
|
||||||
|
0x73, 0x61, 0x67, 0x65, 0x2c, 0x20, 0x22, 0x27, 0x2c, 0x20, 0x73, 0x75,
|
||||||
|
0x6d, 0x6d, 0x61, 0x72, 0x79, 0x3a, 0x20, 0x22, 0x2c, 0x20, 0x64, 0x61,
|
||||||
|
0x74, 0x61, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x69, 0x66, 0x20, 0x28, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d,
|
||||||
|
0x69, 0x6e, 0x67, 0x73, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53,
|
||||||
|
0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d,
|
||||||
|
0x20, 0x64, 0x61, 0x74, 0x61, 0x2e, 0x74, 0x69, 0x6d, 0x69, 0x6e, 0x67,
|
||||||
|
0x73, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c,
|
||||||
|
0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x6e,
|
||||||
|
0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x20, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x70, 0x75,
|
||||||
|
0x74, 0x28, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||||
|
0x65, 0x20, 0x3d, 0x20, 0x75, 0x73, 0x65, 0x53, 0x69, 0x67, 0x6e, 0x61,
|
||||||
|
0x6c, 0x28, 0x22, 0x22, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x20,
|
||||||
|
0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x65, 0x2e, 0x70, 0x72, 0x65,
|
||||||
|
0x76, 0x65, 0x6e, 0x74, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x28,
|
||||||
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69,
|
||||||
|
0x66, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65,
|
||||||
|
0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x20, 0x7b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
||||||
|
0x65, 0x2e, 0x61, 0x62, 0x6f, 0x72, 0x74, 0x28, 0x29, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x74, 0x72, 0x6f, 0x6c, 0x6c, 0x65, 0x72, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
||||||
|
0x65, 0x20, 0x3d, 0x20, 0x6e, 0x75, 0x6c, 0x6c, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63,
|
||||||
|
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x72, 0x65, 0x73, 0x65, 0x74, 0x20, 0x3d,
|
||||||
|
0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70, 0x28, 0x65,
|
||||||
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x74,
|
||||||
|
0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x55, 0x70, 0x64,
|
||||||
|
0x61, 0x74, 0x65, 0x28, 0x5b, 0x5d, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74,
|
||||||
|
0x20, 0x3d, 0x20, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x74, 0x6f, 0x70,
|
||||||
|
0x28, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x63, 0x68, 0x61, 0x74, 0x28, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||||
|
0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x29, 0x3b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||||
|
0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x22, 0x22,
|
||||||
|
0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x65,
|
||||||
|
0x6e, 0x74, 0x65, 0x72, 0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x73, 0x20,
|
||||||
|
0x3d, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x20, 0x3d, 0x3e,
|
||||||
|
0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x69,
|
||||||
|
0x66, 0x20, 0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x77, 0x68, 0x69,
|
||||||
|
0x63, 0x68, 0x20, 0x3d, 0x3d, 0x3d, 0x20, 0x31, 0x33, 0x20, 0x26, 0x26,
|
||||||
|
0x20, 0x21, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x68, 0x69, 0x66,
|
||||||
|
0x74, 0x4b, 0x65, 0x79, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74,
|
||||||
|
0x28, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65,
|
||||||
|
0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66, 0x6f, 0x72, 0x6d,
|
||||||
|
0x20, 0x6f, 0x6e, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x3d, 0x24, 0x7b,
|
||||||
|
0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x7d, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x74,
|
||||||
|
0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x72,
|
||||||
|
0x6f, 0x77, 0x73, 0x3d, 0x32, 0x20, 0x6f, 0x6e, 0x6b, 0x65, 0x79, 0x70,
|
||||||
|
0x72, 0x65, 0x73, 0x73, 0x3d, 0x24, 0x7b, 0x65, 0x6e, 0x74, 0x65, 0x72,
|
||||||
|
0x53, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x73, 0x7d, 0x20, 0x76, 0x61, 0x6c,
|
||||||
|
0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||||
|
0x65, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d,
|
||||||
|
0x24, 0x7b, 0x28, 0x65, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x6d, 0x65, 0x73,
|
||||||
|
0x73, 0x61, 0x67, 0x65, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d,
|
||||||
|
0x20, 0x65, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61,
|
||||||
|
0x6c, 0x75, 0x65, 0x7d, 0x20, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x68, 0x6f,
|
||||||
|
0x6c, 0x64, 0x65, 0x72, 0x3d, 0x22, 0x53, 0x61, 0x79, 0x20, 0x73, 0x6f,
|
||||||
|
0x6d, 0x65, 0x74, 0x68, 0x69, 0x6e, 0x67, 0x2e, 0x2e, 0x2e, 0x22, 0x2f,
|
||||||
|
0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x63, 0x6c,
|
||||||
|
0x61, 0x73, 0x73, 0x3d, 0x22, 0x72, 0x69, 0x67, 0x68, 0x74, 0x22, 0x3e,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x3c, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x20, 0x74, 0x79, 0x70,
|
||||||
|
0x65, 0x3d, 0x22, 0x73, 0x75, 0x62, 0x6d, 0x69, 0x74, 0x22, 0x20, 0x64,
|
||||||
|
0x69, 0x73, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x21, 0x67,
|
||||||
|
0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x2e, 0x76, 0x61,
|
||||||
|
0x6c, 0x75, 0x65, 0x7d, 0x20, 0x3e, 0x53, 0x65, 0x6e, 0x64, 0x3c, 0x2f,
|
||||||
|
0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75, 0x74,
|
||||||
|
0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b, 0x3d,
|
||||||
|
0x24, 0x7b, 0x73, 0x74, 0x6f, 0x70, 0x7d, 0x20, 0x64, 0x69, 0x73, 0x61,
|
||||||
|
0x62, 0x6c, 0x65, 0x64, 0x3d, 0x24, 0x7b, 0x67, 0x65, 0x6e, 0x65, 0x72,
|
||||||
|
0x61, 0x74, 0x69, 0x6e, 0x67, 0x7d, 0x3e, 0x53, 0x74, 0x6f, 0x70, 0x3c,
|
||||||
|
0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x62, 0x75,
|
||||||
|
0x74, 0x74, 0x6f, 0x6e, 0x20, 0x6f, 0x6e, 0x63, 0x6c, 0x69, 0x63, 0x6b,
|
||||||
|
0x3d, 0x24, 0x7b, 0x72, 0x65, 0x73, 0x65, 0x74, 0x7d, 0x3e, 0x52, 0x65,
|
||||||
|
0x73, 0x65, 0x74, 0x3c, 0x2f, 0x62, 0x75, 0x74, 0x74, 0x6f, 0x6e, 0x3e,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
||||||
|
0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x3c, 0x2f, 0x66, 0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43,
|
||||||
|
0x68, 0x61, 0x74, 0x4c, 0x6f, 0x67, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72,
|
||||||
|
0x6f, 0x70, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x65,
|
||||||
|
0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x20, 0x3d, 0x20, 0x73, 0x65, 0x73,
|
||||||
|
0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74,
|
||||||
|
0x72, 0x61, 0x6e, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63,
|
||||||
|
0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x20, 0x3d, 0x20, 0x75,
|
||||||
|
0x73, 0x65, 0x52, 0x65, 0x66, 0x28, 0x6e, 0x75, 0x6c, 0x6c, 0x29, 0x0a,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x75, 0x73, 0x65, 0x45, 0x66,
|
||||||
|
0x66, 0x65, 0x63, 0x74, 0x28, 0x28, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20,
|
||||||
|
0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x20, 0x74, 0x6f, 0x20, 0x62, 0x6f,
|
||||||
|
0x74, 0x74, 0x6f, 0x6d, 0x20, 0x28, 0x69, 0x66, 0x20, 0x6e, 0x65, 0x65,
|
||||||
|
0x64, 0x65, 0x64, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x69, 0x66, 0x20, 0x28, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e,
|
||||||
|
0x65, 0x72, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x20, 0x26,
|
||||||
|
0x26, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e,
|
||||||
|
0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f,
|
||||||
|
0x6c, 0x6c, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x20, 0x3c, 0x3d, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x63, 0x75,
|
||||||
|
0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c,
|
||||||
|
0x54, 0x6f, 0x70, 0x20, 0x2b, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69,
|
||||||
|
0x6e, 0x65, 0x72, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e,
|
||||||
|
0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74,
|
||||||
|
0x20, 0x2b, 0x20, 0x33, 0x30, 0x30, 0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x74,
|
||||||
|
0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e,
|
||||||
|
0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f, 0x6c, 0x6c, 0x54, 0x6f, 0x28, 0x30,
|
||||||
|
0x2c, 0x20, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x2e,
|
||||||
|
0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x74, 0x2e, 0x73, 0x63, 0x72, 0x6f,
|
||||||
|
0x6c, 0x6c, 0x48, 0x65, 0x69, 0x67, 0x68, 0x74, 0x29, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x7d, 0x2c, 0x20, 0x5b, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
|
||||||
|
0x65, 0x73, 0x5d, 0x29, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x63, 0x68, 0x61, 0x74, 0x4c, 0x69,
|
||||||
|
0x6e, 0x65, 0x20, 0x3d, 0x20, 0x28, 0x5b, 0x75, 0x73, 0x65, 0x72, 0x2c,
|
||||||
|
0x20, 0x6d, 0x73, 0x67, 0x5d, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74, 0x75,
|
||||||
|
0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x70, 0x20, 0x6b,
|
||||||
|
0x65, 0x79, 0x3d, 0x24, 0x7b, 0x6d, 0x73, 0x67, 0x7d, 0x3e, 0x3c, 0x73,
|
||||||
|
0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x24, 0x7b, 0x74, 0x65, 0x6d, 0x70,
|
||||||
|
0x6c, 0x61, 0x74, 0x65, 0x28, 0x75, 0x73, 0x65, 0x72, 0x29, 0x7d, 0x3a,
|
||||||
|
0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x20, 0x3c, 0x24,
|
||||||
|
0x7b, 0x4d, 0x61, 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x69, 0x73, 0x68,
|
||||||
|
0x7d, 0x20, 0x74, 0x65, 0x78, 0x74, 0x3d, 0x24, 0x7b, 0x74, 0x65, 0x6d,
|
||||||
|
0x70, 0x6c, 0x61, 0x74, 0x65, 0x28, 0x6d, 0x73, 0x67, 0x29, 0x7d, 0x20,
|
||||||
|
0x2f, 0x3e, 0x3c, 0x2f, 0x70, 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x65,
|
||||||
|
0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x63, 0x68,
|
||||||
|
0x61, 0x74, 0x22, 0x20, 0x72, 0x65, 0x66, 0x3d, 0x24, 0x7b, 0x63, 0x6f,
|
||||||
|
0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72, 0x7d, 0x3e, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x24, 0x7b, 0x6d, 0x65,
|
||||||
|
0x73, 0x73, 0x61, 0x67, 0x65, 0x73, 0x2e, 0x66, 0x6c, 0x61, 0x74, 0x4d,
|
||||||
|
0x61, 0x70, 0x28, 0x63, 0x68, 0x61, 0x74, 0x4c, 0x69, 0x6e, 0x65, 0x29,
|
||||||
|
0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f,
|
||||||
|
0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x3e, 0x60, 0x3b, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63,
|
||||||
|
0x6f, 0x6e, 0x73, 0x74, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46,
|
||||||
|
0x6f, 0x72, 0x6d, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73,
|
||||||
|
0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74,
|
||||||
|
0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x20, 0x3d, 0x20, 0x28,
|
||||||
|
0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x73, 0x65, 0x73, 0x73, 0x69,
|
||||||
|
0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b,
|
||||||
|
0x20, 0x2e, 0x2e, 0x2e, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e,
|
||||||
|
0x76, 0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74,
|
||||||
|
0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3a,
|
||||||
|
0x20, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76,
|
||||||
|
0x61, 0x6c, 0x75, 0x65, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74,
|
||||||
|
0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x20, 0x3d, 0x20, 0x28, 0x65,
|
||||||
|
0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
||||||
|
0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b, 0x20, 0x2e,
|
||||||
|
0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c,
|
||||||
|
0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67,
|
||||||
|
0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3a, 0x20, 0x65, 0x6c,
|
||||||
|
0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
||||||
|
0x65, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f,
|
||||||
|
0x6e, 0x73, 0x74, 0x20, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61,
|
||||||
|
0x72, 0x61, 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x20, 0x3d, 0x20,
|
||||||
|
0x28, 0x65, 0x6c, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x70, 0x61, 0x72, 0x61,
|
||||||
|
0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x20, 0x3d, 0x20, 0x7b,
|
||||||
|
0x20, 0x2e, 0x2e, 0x2e, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76,
|
||||||
|
0x61, 0x6c, 0x75, 0x65, 0x2c, 0x20, 0x5b, 0x65, 0x6c, 0x2e, 0x74, 0x61,
|
||||||
|
0x72, 0x67, 0x65, 0x74, 0x2e, 0x6e, 0x61, 0x6d, 0x65, 0x5d, 0x3a, 0x20,
|
||||||
|
0x70, 0x61, 0x72, 0x73, 0x65, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x28, 0x65,
|
||||||
|
0x6c, 0x2e, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x2e, 0x76, 0x61, 0x6c,
|
||||||
|
0x75, 0x65, 0x29, 0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c,
|
||||||
|
0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x66,
|
||||||
|
0x6f, 0x72, 0x6d, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x3c, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74,
|
||||||
|
0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c,
|
||||||
|
0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x70, 0x72,
|
||||||
|
0x6f, 0x6d, 0x70, 0x74, 0x22, 0x3e, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74,
|
||||||
|
0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
||||||
|
0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61, 0x20, 0x74, 0x79, 0x70,
|
||||||
|
0x65, 0x3d, 0x22, 0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d,
|
||||||
|
0x65, 0x3d, 0x22, 0x70, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x22, 0x20, 0x76,
|
||||||
|
0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73,
|
||||||
|
0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72,
|
||||||
|
0x6f, 0x6d, 0x70, 0x74, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d,
|
||||||
|
0x34, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b,
|
||||||
|
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f,
|
||||||
|
0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61,
|
||||||
|
0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x75, 0x73, 0x65,
|
||||||
|
0x72, 0x22, 0x3e, 0x55, 0x73, 0x65, 0x72, 0x20, 0x6e, 0x61, 0x6d, 0x65,
|
||||||
|
0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
||||||
|
0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22,
|
||||||
|
0x74, 0x65, 0x78, 0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22,
|
||||||
|
0x75, 0x73, 0x65, 0x72, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d,
|
||||||
|
0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76,
|
||||||
|
0x61, 0x6c, 0x75, 0x65, 0x2e, 0x75, 0x73, 0x65, 0x72, 0x7d, 0x22, 0x20,
|
||||||
|
0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70,
|
||||||
|
0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d,
|
||||||
|
0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62,
|
||||||
|
0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x62, 0x6f, 0x74, 0x22,
|
||||||
|
0x3e, 0x42, 0x6f, 0x74, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3c, 0x2f, 0x6c,
|
||||||
|
0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70,
|
||||||
|
0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x74, 0x65, 0x78,
|
||||||
|
0x74, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x63, 0x68, 0x61,
|
||||||
|
0x72, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b,
|
||||||
|
0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
||||||
|
0x65, 0x2e, 0x63, 0x68, 0x61, 0x72, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69,
|
||||||
|
0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74,
|
||||||
|
0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x7d, 0x20, 0x2f, 0x3e,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69,
|
||||||
|
0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20,
|
||||||
|
0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74,
|
||||||
|
0x65, 0x22, 0x3e, 0x50, 0x72, 0x6f, 0x6d, 0x70, 0x74, 0x20, 0x74, 0x65,
|
||||||
|
0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65,
|
||||||
|
0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72,
|
||||||
|
0x65, 0x61, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c,
|
||||||
|
0x61, 0x74, 0x65, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x74,
|
||||||
|
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x20, 0x76, 0x61, 0x6c,
|
||||||
|
0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f,
|
||||||
|
0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65, 0x6d, 0x70,
|
||||||
|
0x6c, 0x61, 0x74, 0x65, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d,
|
||||||
|
0x34, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b,
|
||||||
|
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f,
|
||||||
|
0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61,
|
||||||
|
0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d,
|
||||||
|
0x70, 0x6c, 0x61, 0x74, 0x65, 0x22, 0x3e, 0x43, 0x68, 0x61, 0x74, 0x20,
|
||||||
|
0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x20, 0x74, 0x65, 0x6d, 0x70,
|
||||||
|
0x6c, 0x61, 0x74, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x3c, 0x74, 0x65, 0x78, 0x74, 0x61, 0x72, 0x65, 0x61,
|
||||||
|
0x20, 0x69, 0x64, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74,
|
||||||
|
0x65, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x68, 0x69, 0x73,
|
||||||
|
0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x74, 0x65,
|
||||||
|
0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x73,
|
||||||
|
0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||||
|
0x2e, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x72, 0x79, 0x54, 0x65, 0x6d, 0x70,
|
||||||
|
0x6c, 0x61, 0x74, 0x65, 0x7d, 0x22, 0x20, 0x72, 0x6f, 0x77, 0x73, 0x3d,
|
||||||
|
0x31, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b,
|
||||||
|
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f,
|
||||||
|
0x6e, 0x7d, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61,
|
||||||
|
0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x74, 0x65, 0x6d,
|
||||||
|
0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x3e, 0x54, 0x65,
|
||||||
|
0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x3c, 0x2f, 0x6c,
|
||||||
|
0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70,
|
||||||
|
0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x6e,
|
||||||
|
0x67, 0x65, 0x22, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x74, 0x65, 0x6d, 0x70,
|
||||||
|
0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x20, 0x6d, 0x69, 0x6e,
|
||||||
|
0x3d, 0x22, 0x30, 0x2e, 0x30, 0x22, 0x20, 0x6d, 0x61, 0x78, 0x3d, 0x22,
|
||||||
|
0x31, 0x2e, 0x30, 0x22, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3d, 0x22, 0x30,
|
||||||
|
0x2e, 0x30, 0x31, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x74,
|
||||||
|
0x65, 0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x22, 0x20,
|
||||||
|
0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x70, 0x61, 0x72,
|
||||||
|
0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65,
|
||||||
|
0x6d, 0x70, 0x65, 0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x7d, 0x22, 0x20,
|
||||||
|
0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70,
|
||||||
|
0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x46, 0x6c,
|
||||||
|
0x6f, 0x61, 0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73,
|
||||||
|
0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
||||||
|
0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x74, 0x65, 0x6d, 0x70, 0x65,
|
||||||
|
0x72, 0x61, 0x74, 0x75, 0x72, 0x65, 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61,
|
||||||
|
0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
||||||
|
0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65,
|
||||||
|
0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d, 0x22, 0x6e, 0x50, 0x72, 0x65, 0x64,
|
||||||
|
0x69, 0x63, 0x74, 0x22, 0x3e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74,
|
||||||
|
0x69, 0x6f, 0x6e, 0x73, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79,
|
||||||
|
0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x20, 0x69,
|
||||||
|
0x64, 0x3d, 0x22, 0x6e, 0x50, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x22,
|
||||||
|
0x20, 0x6d, 0x69, 0x6e, 0x3d, 0x22, 0x31, 0x22, 0x20, 0x6d, 0x61, 0x78,
|
||||||
|
0x3d, 0x22, 0x32, 0x30, 0x34, 0x38, 0x22, 0x20, 0x73, 0x74, 0x65, 0x70,
|
||||||
|
0x3d, 0x22, 0x31, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x6e,
|
||||||
|
0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x22, 0x20, 0x76, 0x61,
|
||||||
|
0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d,
|
||||||
|
0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72,
|
||||||
|
0x65, 0x64, 0x69, 0x63, 0x74, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e,
|
||||||
|
0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65,
|
||||||
|
0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x7d,
|
||||||
|
0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e,
|
||||||
|
0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c,
|
||||||
|
0x75, 0x65, 0x2e, 0x6e, 0x5f, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74,
|
||||||
|
0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64,
|
||||||
|
0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f, 0x72, 0x3d,
|
||||||
|
0x22, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61,
|
||||||
|
0x6c, 0x74, 0x79, 0x22, 0x3e, 0x50, 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a,
|
||||||
|
0x65, 0x20, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x20, 0x73, 0x65, 0x71,
|
||||||
|
0x75, 0x65, 0x6e, 0x63, 0x65, 0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c,
|
||||||
|
0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x3c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74,
|
||||||
|
0x79, 0x70, 0x65, 0x3d, 0x22, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x20,
|
||||||
|
0x69, 0x64, 0x3d, 0x22, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70,
|
||||||
|
0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79, 0x22, 0x20, 0x6d, 0x69, 0x6e, 0x3d,
|
||||||
|
0x22, 0x30, 0x2e, 0x30, 0x22, 0x20, 0x6d, 0x61, 0x78, 0x3d, 0x22, 0x32,
|
||||||
|
0x2e, 0x30, 0x22, 0x20, 0x73, 0x74, 0x65, 0x70, 0x3d, 0x22, 0x30, 0x2e,
|
||||||
|
0x30, 0x31, 0x22, 0x20, 0x6e, 0x61, 0x6d, 0x65, 0x3d, 0x22, 0x72, 0x65,
|
||||||
|
0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x74, 0x79,
|
||||||
|
0x22, 0x20, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3d, 0x22, 0x24, 0x7b, 0x70,
|
||||||
|
0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e,
|
||||||
|
0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c,
|
||||||
|
0x74, 0x79, 0x7d, 0x22, 0x20, 0x6f, 0x6e, 0x69, 0x6e, 0x70, 0x75, 0x74,
|
||||||
|
0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x50, 0x61, 0x72,
|
||||||
|
0x61, 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x7d, 0x20, 0x2f, 0x3e,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x24, 0x7b, 0x70,
|
||||||
|
0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e,
|
||||||
|
0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x70, 0x65, 0x6e, 0x61, 0x6c,
|
||||||
|
0x74, 0x79, 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
||||||
|
0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x3e,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x3c, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x20, 0x66, 0x6f,
|
||||||
|
0x72, 0x3d, 0x22, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x6c, 0x61,
|
||||||
|
0x73, 0x74, 0x5f, 0x6e, 0x22, 0x3e, 0x43, 0x6f, 0x6e, 0x73, 0x69, 0x64,
|
||||||
|
0x65, 0x72, 0x20, 0x4e, 0x20, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x20,
|
||||||
|
0x66, 0x6f, 0x72, 0x20, 0x70, 0x65, 0x6e, 0x61, 0x6c, 0x69, 0x7a, 0x65,
|
||||||
|
0x3c, 0x2f, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
||||||
|
0x69, 0x6e, 0x70, 0x75, 0x74, 0x20, 0x74, 0x79, 0x70, 0x65, 0x3d, 0x22,
|
||||||
|
0x72, 0x61, 0x6e, 0x67, 0x65, 0x22, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x72,
|
||||||
|
0x65, 0x70, 0x65, 0x61, 0x74, 0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e,
|
||||||
|
0x22, 0x20, 0x6d, 0x69, 0x6e, 0x3d, 0x22, 0x30, 0x2e, 0x30, 0x22, 0x20,
|
||||||
|
0x6d, 0x61, 0x78, 0x3d, 0x22, 0x32, 0x30, 0x34, 0x38, 0x22, 0x20, 0x6e,
|
||||||
|
0x61, 0x6d, 0x65, 0x3d, 0x22, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f,
|
||||||
|
0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x22, 0x20, 0x76, 0x61, 0x6c, 0x75,
|
||||||
|
0x65, 0x3d, 0x22, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e,
|
||||||
|
0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74,
|
||||||
|
0x5f, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x7d, 0x22, 0x20, 0x6f, 0x6e,
|
||||||
|
0x69, 0x6e, 0x70, 0x75, 0x74, 0x3d, 0x24, 0x7b, 0x75, 0x70, 0x64, 0x61,
|
||||||
|
0x74, 0x65, 0x50, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x46, 0x6c, 0x6f, 0x61,
|
||||||
|
0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61,
|
||||||
|
0x6e, 0x3e, 0x24, 0x7b, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x76,
|
||||||
|
0x61, 0x6c, 0x75, 0x65, 0x2e, 0x72, 0x65, 0x70, 0x65, 0x61, 0x74, 0x5f,
|
||||||
|
0x6c, 0x61, 0x73, 0x74, 0x5f, 0x6e, 0x7d, 0x3c, 0x2f, 0x73, 0x70, 0x61,
|
||||||
|
0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66,
|
||||||
|
0x69, 0x65, 0x6c, 0x64, 0x73, 0x65, 0x74, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x66, 0x6f, 0x72, 0x6d, 0x3e,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x2f, 0x2f, 0x20, 0x70, 0x6f,
|
||||||
|
0x6f, 0x72, 0x20, 0x6d, 0x61, 0x6e, 0x73, 0x20, 0x6d, 0x61, 0x72, 0x6b,
|
||||||
|
0x64, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65,
|
||||||
|
0x6d, 0x65, 0x6e, 0x74, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e,
|
||||||
|
0x73, 0x74, 0x20, 0x4d, 0x61, 0x72, 0x6b, 0x64, 0x6f, 0x77, 0x6e, 0x69,
|
||||||
|
0x73, 0x68, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x73,
|
||||||
|
0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x6d, 0x64, 0x20, 0x3d, 0x20,
|
||||||
|
0x70, 0x61, 0x72, 0x61, 0x6d, 0x73, 0x2e, 0x74, 0x65, 0x78, 0x74, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70,
|
||||||
|
0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5e, 0x23, 0x7b, 0x31, 0x2c, 0x36,
|
||||||
|
0x7d, 0x20, 0x28, 0x2e, 0x2a, 0x29, 0x24, 0x2f, 0x67, 0x69, 0x6d, 0x2c,
|
||||||
|
0x20, 0x27, 0x3c, 0x68, 0x33, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x68, 0x33,
|
||||||
|
0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5c, 0x2a,
|
||||||
|
0x5c, 0x2a, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5c, 0x2a, 0x5c, 0x2a, 0x2f,
|
||||||
|
0x67, 0x2c, 0x20, 0x27, 0x3c, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e,
|
||||||
|
0x24, 0x31, 0x3c, 0x2f, 0x73, 0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x27,
|
||||||
|
0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72,
|
||||||
|
0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5f, 0x5f, 0x28, 0x2e,
|
||||||
|
0x2a, 0x3f, 0x29, 0x5f, 0x5f, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x73,
|
||||||
|
0x74, 0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x73, 0x74,
|
||||||
|
0x72, 0x6f, 0x6e, 0x67, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65,
|
||||||
|
0x28, 0x2f, 0x5c, 0x2a, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5c, 0x2a, 0x2f,
|
||||||
|
0x67, 0x2c, 0x20, 0x27, 0x3c, 0x65, 0x6d, 0x3e, 0x24, 0x31, 0x3c, 0x2f,
|
||||||
|
0x65, 0x6d, 0x3e, 0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f,
|
||||||
|
0x5f, 0x28, 0x2e, 0x2a, 0x3f, 0x29, 0x5f, 0x2f, 0x67, 0x2c, 0x20, 0x27,
|
||||||
|
0x3c, 0x65, 0x6d, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x65, 0x6d, 0x3e, 0x27,
|
||||||
|
0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e, 0x72,
|
||||||
|
0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x60, 0x60, 0x60, 0x2e,
|
||||||
|
0x2a, 0x3f, 0x5c, 0x6e, 0x28, 0x5b, 0x5c, 0x73, 0x5c, 0x53, 0x5d, 0x2a,
|
||||||
|
0x3f, 0x29, 0x60, 0x60, 0x60, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x70,
|
||||||
|
0x72, 0x65, 0x3e, 0x3c, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x24, 0x31, 0x3c,
|
||||||
|
0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3e, 0x3c, 0x2f, 0x70, 0x72, 0x65, 0x3e,
|
||||||
|
0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e,
|
||||||
|
0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x60, 0x28, 0x2e,
|
||||||
|
0x2a, 0x3f, 0x29, 0x60, 0x2f, 0x67, 0x2c, 0x20, 0x27, 0x3c, 0x63, 0x6f,
|
||||||
|
0x64, 0x65, 0x3e, 0x24, 0x31, 0x3c, 0x2f, 0x63, 0x6f, 0x64, 0x65, 0x3e,
|
||||||
|
0x27, 0x29, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x2e,
|
||||||
|
0x72, 0x65, 0x70, 0x6c, 0x61, 0x63, 0x65, 0x28, 0x2f, 0x5c, 0x6e, 0x2f,
|
||||||
|
0x67, 0x69, 0x6d, 0x2c, 0x20, 0x27, 0x3c, 0x62, 0x72, 0x20, 0x2f, 0x3e,
|
||||||
|
0x27, 0x29, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65,
|
||||||
|
0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x3c, 0x73,
|
||||||
|
0x70, 0x61, 0x6e, 0x20, 0x64, 0x61, 0x6e, 0x67, 0x65, 0x72, 0x6f, 0x75,
|
||||||
|
0x73, 0x6c, 0x79, 0x53, 0x65, 0x74, 0x49, 0x6e, 0x6e, 0x65, 0x72, 0x48,
|
||||||
|
0x54, 0x4d, 0x4c, 0x3d, 0x24, 0x7b, 0x7b, 0x20, 0x5f, 0x5f, 0x68, 0x74,
|
||||||
|
0x6d, 0x6c, 0x3a, 0x20, 0x6d, 0x64, 0x20, 0x7d, 0x7d, 0x20, 0x2f, 0x3e,
|
||||||
|
0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x3b, 0x0a, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x20, 0x4d, 0x6f, 0x64,
|
||||||
|
0x65, 0x6c, 0x47, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x49, 0x6e, 0x66, 0x6f, 0x20, 0x3d, 0x20, 0x28, 0x70, 0x61, 0x72, 0x61,
|
||||||
|
0x6d, 0x73, 0x29, 0x20, 0x3d, 0x3e, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x69, 0x66, 0x20, 0x28, 0x21, 0x6c, 0x6c, 0x61, 0x6d,
|
||||||
|
0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||||
|
0x29, 0x20, 0x7b, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x72, 0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60,
|
||||||
|
0x3c, 0x73, 0x70, 0x61, 0x6e, 0x2f, 0x3e, 0x60, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x7d, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72,
|
||||||
|
0x65, 0x74, 0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x73, 0x70, 0x61,
|
||||||
|
0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x24, 0x7b, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x53, 0x74, 0x61, 0x74,
|
||||||
|
0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x70, 0x72, 0x65, 0x64,
|
||||||
|
0x69, 0x63, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x6f,
|
||||||
|
0x6b, 0x65, 0x6e, 0x5f, 0x6d, 0x73, 0x2e, 0x74, 0x6f, 0x46, 0x69, 0x78,
|
||||||
|
0x65, 0x64, 0x28, 0x29, 0x7d, 0x6d, 0x73, 0x20, 0x70, 0x65, 0x72, 0x20,
|
||||||
|
0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x2c, 0x20, 0x24, 0x7b, 0x6c, 0x6c, 0x61,
|
||||||
|
0x6d, 0x61, 0x53, 0x74, 0x61, 0x74, 0x73, 0x2e, 0x76, 0x61, 0x6c, 0x75,
|
||||||
|
0x65, 0x2e, 0x70, 0x72, 0x65, 0x64, 0x69, 0x63, 0x74, 0x65, 0x64, 0x5f,
|
||||||
|
0x70, 0x65, 0x72, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x2e, 0x74,
|
||||||
|
0x6f, 0x46, 0x69, 0x78, 0x65, 0x64, 0x28, 0x32, 0x29, 0x7d, 0x20, 0x74,
|
||||||
|
0x6f, 0x6b, 0x65, 0x6e, 0x73, 0x20, 0x70, 0x65, 0x72, 0x20, 0x73, 0x65,
|
||||||
|
0x63, 0x6f, 0x6e, 0x64, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x3c, 0x2f, 0x73, 0x70, 0x61, 0x6e, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x60, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x7d, 0x0a, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x66, 0x75, 0x6e, 0x63, 0x74, 0x69, 0x6f, 0x6e,
|
||||||
|
0x20, 0x41, 0x70, 0x70, 0x28, 0x70, 0x72, 0x6f, 0x70, 0x73, 0x29, 0x20,
|
||||||
|
0x7b, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x74,
|
||||||
|
0x75, 0x72, 0x6e, 0x20, 0x68, 0x74, 0x6d, 0x6c, 0x60, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x64, 0x69, 0x76, 0x20, 0x69,
|
||||||
|
0x64, 0x3d, 0x22, 0x63, 0x6f, 0x6e, 0x74, 0x61, 0x69, 0x6e, 0x65, 0x72,
|
||||||
|
0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x3c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x3e, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x68,
|
||||||
|
0x31, 0x3e, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x3c,
|
||||||
|
0x2f, 0x68, 0x31, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x3c, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x3e,
|
||||||
|
0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x3c, 0x6d, 0x61, 0x69, 0x6e, 0x20, 0x69, 0x64, 0x3d, 0x22, 0x63, 0x6f,
|
||||||
|
0x6e, 0x74, 0x65, 0x6e, 0x74, 0x22, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24, 0x7b, 0x63,
|
||||||
|
0x68, 0x61, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x2e, 0x76,
|
||||||
|
0x61, 0x6c, 0x75, 0x65, 0x20, 0x3f, 0x20, 0x43, 0x68, 0x61, 0x74, 0x4c,
|
||||||
|
0x6f, 0x67, 0x20, 0x3a, 0x20, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x46,
|
||||||
|
0x6f, 0x72, 0x6d, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x6d, 0x61, 0x69, 0x6e,
|
||||||
|
0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x3c, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x20, 0x69, 0x64,
|
||||||
|
0x3d, 0x22, 0x77, 0x72, 0x69, 0x74, 0x65, 0x22, 0x3e, 0x0a, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x24,
|
||||||
|
0x7b, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x49, 0x6e, 0x70, 0x75,
|
||||||
|
0x74, 0x7d, 0x20, 0x2f, 0x3e, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x73, 0x65, 0x63, 0x74, 0x69, 0x6f,
|
||||||
|
0x6e, 0x3e, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x3c, 0x66, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x3e, 0x0a, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
||||||
|
0x70, 0x3e, 0x3c, 0x24, 0x7b, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x47, 0x65,
|
||||||
|
0x6e, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f,
|
||||||
|
0x7d, 0x20, 0x2f, 0x3e, 0x3c, 0x2f, 0x70, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x70, 0x3e,
|
||||||
|
0x50, 0x6f, 0x77, 0x65, 0x72, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x3c,
|
||||||
|
0x61, 0x20, 0x68, 0x72, 0x65, 0x66, 0x3d, 0x22, 0x68, 0x74, 0x74, 0x70,
|
||||||
|
0x73, 0x3a, 0x2f, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63,
|
||||||
|
0x6f, 0x6d, 0x2f, 0x67, 0x67, 0x65, 0x72, 0x67, 0x61, 0x6e, 0x6f, 0x76,
|
||||||
|
0x2f, 0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x22, 0x3e,
|
||||||
|
0x6c, 0x6c, 0x61, 0x6d, 0x61, 0x2e, 0x63, 0x70, 0x70, 0x3c, 0x2f, 0x61,
|
||||||
|
0x3e, 0x20, 0x61, 0x6e, 0x64, 0x20, 0x3c, 0x61, 0x20, 0x68, 0x72, 0x65,
|
||||||
|
0x66, 0x3d, 0x22, 0x68, 0x74, 0x74, 0x70, 0x73, 0x3a, 0x2f, 0x2f, 0x67,
|
||||||
|
0x67, 0x6d, 0x6c, 0x2e, 0x61, 0x69, 0x22, 0x3e, 0x67, 0x67, 0x6d, 0x6c,
|
||||||
|
0x2e, 0x61, 0x69, 0x3c, 0x2f, 0x61, 0x3e, 0x2e, 0x3c, 0x2f, 0x70, 0x3e,
|
||||||
|
0x0a, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x3c,
|
||||||
|
0x2f, 0x66, 0x6f, 0x6f, 0x74, 0x65, 0x72, 0x3e, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x3c, 0x2f, 0x64, 0x69, 0x76, 0x3e, 0x0a,
|
||||||
|
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x60, 0x3b, 0x0a, 0x20, 0x20, 0x20,
|
||||||
|
0x20, 0x7d, 0x0a, 0x0a, 0x20, 0x20, 0x20, 0x20, 0x72, 0x65, 0x6e, 0x64,
|
||||||
|
0x65, 0x72, 0x28, 0x68, 0x28, 0x41, 0x70, 0x70, 0x29, 0x2c, 0x20, 0x64,
|
||||||
|
0x6f, 0x63, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x62, 0x6f, 0x64, 0x79,
|
||||||
|
0x29, 0x3b, 0x0a, 0x20, 0x20, 0x3c, 0x2f, 0x73, 0x63, 0x72, 0x69, 0x70,
|
||||||
|
0x74, 0x3e, 0x0a, 0x3c, 0x2f, 0x68, 0x65, 0x61, 0x64, 0x3e, 0x0a, 0x0a,
|
||||||
|
0x3c, 0x62, 0x6f, 0x64, 0x79, 0x3e, 0x0a, 0x3c, 0x2f, 0x62, 0x6f, 0x64,
|
||||||
|
0x79, 0x3e, 0x0a, 0x0a, 0x3c, 0x2f, 0x68, 0x74, 0x6d, 0x6c, 0x3e, 0x0a
|
||||||
|
};
|
||||||
|
unsigned int index_html_len = 10752;
|
1851
examples/server/index.js.hpp
Normal file
1851
examples/server/index.js.hpp
Normal file
File diff suppressed because it is too large
Load diff
168
examples/server/public/completion.js
Normal file
168
examples/server/public/completion.js
Normal file
|
@ -0,0 +1,168 @@
|
||||||
|
const paramDefaults = {
|
||||||
|
stream: true,
|
||||||
|
n_predict: 500,
|
||||||
|
temperature: 0.2,
|
||||||
|
stop: ["</s>"]
|
||||||
|
};
|
||||||
|
|
||||||
|
let generation_settings = null;
|
||||||
|
|
||||||
|
|
||||||
|
// Completes the prompt as a generator. Recommended for most use cases.
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// import { llama } from '/completion.js'
|
||||||
|
//
|
||||||
|
// const request = llama("Tell me a joke", {n_predict: 800})
|
||||||
|
// for await (const chunk of request) {
|
||||||
|
// document.write(chunk.data.content)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
export async function* llama(prompt, params = {}, config = {}) {
|
||||||
|
let controller = config.controller;
|
||||||
|
|
||||||
|
if (!controller) {
|
||||||
|
controller = new AbortController();
|
||||||
|
}
|
||||||
|
|
||||||
|
const completionParams = { ...paramDefaults, ...params, prompt };
|
||||||
|
|
||||||
|
const response = await fetch("/completion", {
|
||||||
|
method: 'POST',
|
||||||
|
body: JSON.stringify(completionParams),
|
||||||
|
headers: {
|
||||||
|
'Connection': 'keep-alive',
|
||||||
|
'Content-Type': 'application/json',
|
||||||
|
'Accept': 'text/event-stream'
|
||||||
|
},
|
||||||
|
signal: controller.signal,
|
||||||
|
});
|
||||||
|
|
||||||
|
const reader = response.body.getReader();
|
||||||
|
const decoder = new TextDecoder();
|
||||||
|
|
||||||
|
let content = "";
|
||||||
|
|
||||||
|
try {
|
||||||
|
let cont = true;
|
||||||
|
|
||||||
|
while (cont) {
|
||||||
|
const result = await reader.read();
|
||||||
|
if (result.done) {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
// sse answers in the form multiple lines of: value\n with data always present as a key. in our case we
|
||||||
|
// mainly care about the data: key here, which we expect as json
|
||||||
|
const text = decoder.decode(result.value);
|
||||||
|
|
||||||
|
// parse all sse events and add them to result
|
||||||
|
const regex = /^(\S+):\s(.*)$/gm;
|
||||||
|
for (const match of text.matchAll(regex)) {
|
||||||
|
result[match[1]] = match[2]
|
||||||
|
}
|
||||||
|
|
||||||
|
// since we know this is llama.cpp, let's just decode the json in data
|
||||||
|
result.data = JSON.parse(result.data);
|
||||||
|
content += result.data.content;
|
||||||
|
|
||||||
|
// yield
|
||||||
|
yield result;
|
||||||
|
|
||||||
|
// if we got a stop token from server, we will break here
|
||||||
|
if (result.data.stop) {
|
||||||
|
if (result.data.generation_settings) {
|
||||||
|
generation_settings = result.data.generation_settings;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} catch (e) {
|
||||||
|
if (e.name !== 'AbortError') {
|
||||||
|
console.error("llama error: ", e);
|
||||||
|
}
|
||||||
|
throw e;
|
||||||
|
}
|
||||||
|
finally {
|
||||||
|
controller.abort();
|
||||||
|
}
|
||||||
|
|
||||||
|
return content;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call llama, return an event target that you can subcribe to
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// import { llamaEventTarget } from '/completion.js'
|
||||||
|
//
|
||||||
|
// const conn = llamaEventTarget(prompt)
|
||||||
|
// conn.addEventListener("message", (chunk) => {
|
||||||
|
// document.write(chunk.detail.content)
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
export const llamaEventTarget = (prompt, params = {}, config = {}) => {
|
||||||
|
const eventTarget = new EventTarget();
|
||||||
|
(async () => {
|
||||||
|
let content = "";
|
||||||
|
for await (const chunk of llama(prompt, params, config)) {
|
||||||
|
if (chunk.data) {
|
||||||
|
content += chunk.data.content;
|
||||||
|
eventTarget.dispatchEvent(new CustomEvent("message", { detail: chunk.data }));
|
||||||
|
}
|
||||||
|
if (chunk.data.generation_settings) {
|
||||||
|
eventTarget.dispatchEvent(new CustomEvent("generation_settings", { detail: chunk.data.generation_settings }));
|
||||||
|
}
|
||||||
|
if (chunk.data.timings) {
|
||||||
|
eventTarget.dispatchEvent(new CustomEvent("timings", { detail: chunk.data.timings }));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
eventTarget.dispatchEvent(new CustomEvent("done", { detail: { content } }));
|
||||||
|
})();
|
||||||
|
return eventTarget;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Call llama, return a promise that resolves to the completed text. This does not support streaming
|
||||||
|
//
|
||||||
|
// Example:
|
||||||
|
//
|
||||||
|
// llamaPromise(prompt).then((content) => {
|
||||||
|
// document.write(content)
|
||||||
|
// })
|
||||||
|
//
|
||||||
|
// or
|
||||||
|
//
|
||||||
|
// const content = await llamaPromise(prompt)
|
||||||
|
// document.write(content)
|
||||||
|
//
|
||||||
|
export const llamaPromise = (prompt, params = {}, config = {}) => {
|
||||||
|
return new Promise(async (resolve, reject) => {
|
||||||
|
let content = "";
|
||||||
|
try {
|
||||||
|
for await (const chunk of llama(prompt, params, config)) {
|
||||||
|
content += chunk.data.content;
|
||||||
|
}
|
||||||
|
resolve(content);
|
||||||
|
} catch (error) {
|
||||||
|
reject(error);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
};
|
||||||
|
|
||||||
|
/**
|
||||||
|
* (deprecated)
|
||||||
|
*/
|
||||||
|
export const llamaComplete = async (params, controller, callback) => {
|
||||||
|
for await (const chunk of llama(params.prompt, params, { controller })) {
|
||||||
|
callback(chunk);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the model info from the server. This is useful for getting the context window and so on.
|
||||||
|
export const llamaModelInfo = async () => {
|
||||||
|
if (!generation_settings) {
|
||||||
|
generation_settings = await fetch("/model.json").then(r => r.json());
|
||||||
|
}
|
||||||
|
return generation_settings;
|
||||||
|
}
|
380
examples/server/public/index.html
Normal file
380
examples/server/public/index.html
Normal file
|
@ -0,0 +1,380 @@
|
||||||
|
<html>
|
||||||
|
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<meta name="viewport" content="width=device-width, initial-scale=1, maximum-scale=1" />
|
||||||
|
<title>llama.cpp - chat</title>
|
||||||
|
|
||||||
|
<style>
|
||||||
|
body {
|
||||||
|
background-color: #fff;
|
||||||
|
color: #000;
|
||||||
|
font-family: system-ui;
|
||||||
|
font-size: 90%;
|
||||||
|
}
|
||||||
|
|
||||||
|
#container {
|
||||||
|
margin: 0em auto;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
justify-content: space-between;
|
||||||
|
height: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
main {
|
||||||
|
margin: 3px;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
justify-content: space-between;
|
||||||
|
gap: 1em;
|
||||||
|
|
||||||
|
flex-grow: 1;
|
||||||
|
overflow-y: auto;
|
||||||
|
|
||||||
|
border: 1px solid #ccc;
|
||||||
|
border-radius: 5px;
|
||||||
|
padding: 0.5em;
|
||||||
|
}
|
||||||
|
|
||||||
|
body {
|
||||||
|
max-width: 600px;
|
||||||
|
min-width: 300px;
|
||||||
|
line-height: 1.2;
|
||||||
|
margin: 0 auto;
|
||||||
|
padding: 0 0.5em;
|
||||||
|
}
|
||||||
|
|
||||||
|
p {
|
||||||
|
overflow-wrap: break-word;
|
||||||
|
word-wrap: break-word;
|
||||||
|
hyphens: auto;
|
||||||
|
margin-top: 0.5em;
|
||||||
|
margin-bottom: 0.5em;
|
||||||
|
}
|
||||||
|
|
||||||
|
#write form {
|
||||||
|
margin: 1em 0 0 0;
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
gap: 0.5em;
|
||||||
|
align-items: stretch;
|
||||||
|
}
|
||||||
|
|
||||||
|
.right {
|
||||||
|
display: flex;
|
||||||
|
flex-direction: row;
|
||||||
|
gap: 0.5em;
|
||||||
|
justify-content: flex-end;
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldset {
|
||||||
|
border: none;
|
||||||
|
padding: 0;
|
||||||
|
margin: 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
textarea {
|
||||||
|
padding: 5px;
|
||||||
|
flex-grow: 1;
|
||||||
|
width: 100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
pre code {
|
||||||
|
display: block;
|
||||||
|
background-color: #222;
|
||||||
|
color: #ddd;
|
||||||
|
}
|
||||||
|
code {
|
||||||
|
font-family: monospace;
|
||||||
|
padding: 0.1em 0.3em;
|
||||||
|
border-radius: 3px;
|
||||||
|
}
|
||||||
|
|
||||||
|
fieldset label {
|
||||||
|
margin: 0.5em 0;
|
||||||
|
display: block;
|
||||||
|
}
|
||||||
|
|
||||||
|
header, footer {
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
footer {
|
||||||
|
font-size: 80%;
|
||||||
|
color: #888;
|
||||||
|
}
|
||||||
|
</style>
|
||||||
|
|
||||||
|
<script type="module">
|
||||||
|
import {
|
||||||
|
html, h, signal, effect, computed, render, useSignal, useEffect, useRef
|
||||||
|
} from '/index.js';
|
||||||
|
|
||||||
|
import { llama } from '/completion.js';
|
||||||
|
|
||||||
|
const session = signal({
|
||||||
|
prompt: "This is a conversation between user and llama, a friendly chatbot. respond in simple markdown.",
|
||||||
|
template: "{{prompt}}\n\n{{history}}\n{{char}}:",
|
||||||
|
historyTemplate: "{{name}}: {{message}}",
|
||||||
|
transcript: [],
|
||||||
|
type: "chat",
|
||||||
|
char: "llama",
|
||||||
|
user: "User",
|
||||||
|
})
|
||||||
|
|
||||||
|
const params = signal({
|
||||||
|
n_predict: 400,
|
||||||
|
temperature: 0.7,
|
||||||
|
repeat_last_n: 256,
|
||||||
|
repeat_penalty: 1.18,
|
||||||
|
top_k: 40,
|
||||||
|
top_p: 0.5,
|
||||||
|
})
|
||||||
|
|
||||||
|
const llamaStats = signal(null)
|
||||||
|
const controller = signal(null)
|
||||||
|
|
||||||
|
const generating = computed(() => controller.value == null )
|
||||||
|
const chatStarted = computed(() => session.value.transcript.length > 0)
|
||||||
|
|
||||||
|
const transcriptUpdate = (transcript) => {
|
||||||
|
session.value = {
|
||||||
|
...session.value,
|
||||||
|
transcript
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// simple template replace
|
||||||
|
const template = (str, extraSettings) => {
|
||||||
|
let settings = session.value;
|
||||||
|
if (extraSettings) {
|
||||||
|
settings = { ...settings, ...extraSettings };
|
||||||
|
}
|
||||||
|
return String(str).replaceAll(/\{\{(.*?)\}\}/g, (_, key) => template(settings[key]));
|
||||||
|
}
|
||||||
|
|
||||||
|
// send message to server
|
||||||
|
const chat = async (msg) => {
|
||||||
|
if (controller.value) {
|
||||||
|
console.log('already running...');
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
controller.value = new AbortController();
|
||||||
|
|
||||||
|
transcriptUpdate([...session.value.transcript, ["{{user}}", msg]])
|
||||||
|
|
||||||
|
const prompt = template(session.value.template, {
|
||||||
|
message: msg,
|
||||||
|
history: session.value.transcript.flatMap(([name, message]) => template(session.value.historyTemplate, {name, message})).join("\n"),
|
||||||
|
});
|
||||||
|
|
||||||
|
let currentMessage = '';
|
||||||
|
const history = session.value.transcript
|
||||||
|
|
||||||
|
const llamaParams = {
|
||||||
|
...params.value,
|
||||||
|
stop: ["</s>", template("{{char}}:"), template("{{user}}:")],
|
||||||
|
}
|
||||||
|
|
||||||
|
for await (const chunk of llama(prompt, llamaParams, { controller: controller.value })) {
|
||||||
|
const data = chunk.data;
|
||||||
|
currentMessage += data.content;
|
||||||
|
|
||||||
|
// remove leading whitespace
|
||||||
|
currentMessage = currentMessage.replace(/^\s+/, "")
|
||||||
|
|
||||||
|
transcriptUpdate([...history, ["{{char}}", currentMessage]])
|
||||||
|
|
||||||
|
if (data.stop) {
|
||||||
|
console.log("Completion finished: '", currentMessage, "', summary: ", data);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (data.timings) {
|
||||||
|
llamaStats.value = data.timings;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
controller.value = null;
|
||||||
|
}
|
||||||
|
|
||||||
|
function MessageInput() {
|
||||||
|
const message = useSignal("")
|
||||||
|
|
||||||
|
const stop = (e) => {
|
||||||
|
e.preventDefault();
|
||||||
|
if (controller.value) {
|
||||||
|
controller.value.abort();
|
||||||
|
controller.value = null;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const reset = (e) => {
|
||||||
|
stop(e);
|
||||||
|
transcriptUpdate([]);
|
||||||
|
}
|
||||||
|
|
||||||
|
const submit = (e) => {
|
||||||
|
stop(e);
|
||||||
|
chat(message.value);
|
||||||
|
message.value = "";
|
||||||
|
}
|
||||||
|
|
||||||
|
const enterSubmits = (event) => {
|
||||||
|
if (event.which === 13 && !event.shiftKey) {
|
||||||
|
submit(event);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return html`
|
||||||
|
<form onsubmit=${submit}>
|
||||||
|
<div>
|
||||||
|
<textarea type="text" rows=2 onkeypress=${enterSubmits} value="${message}" oninput=${(e) => message.value = e.target.value} placeholder="Say something..."/>
|
||||||
|
</div>
|
||||||
|
<div class="right">
|
||||||
|
<button type="submit" disabled=${!generating.value} >Send</button>
|
||||||
|
<button onclick=${stop} disabled=${generating}>Stop</button>
|
||||||
|
<button onclick=${reset}>Reset</button>
|
||||||
|
</div>
|
||||||
|
</form>
|
||||||
|
`
|
||||||
|
}
|
||||||
|
|
||||||
|
const ChatLog = (props) => {
|
||||||
|
const messages = session.value.transcript;
|
||||||
|
const container = useRef(null)
|
||||||
|
|
||||||
|
useEffect(() => {
|
||||||
|
// scroll to bottom (if needed)
|
||||||
|
if (container.current && container.current.scrollHeight <= container.current.scrollTop + container.current.offsetHeight + 300) {
|
||||||
|
container.current.scrollTo(0, container.current.scrollHeight)
|
||||||
|
}
|
||||||
|
}, [messages])
|
||||||
|
|
||||||
|
const chatLine = ([user, msg]) => {
|
||||||
|
return html`<p key=${msg}><strong>${template(user)}:</strong> <${Markdownish} text=${template(msg)} /></p>`
|
||||||
|
};
|
||||||
|
|
||||||
|
return html`
|
||||||
|
<section id="chat" ref=${container}>
|
||||||
|
${messages.flatMap(chatLine)}
|
||||||
|
</section>`;
|
||||||
|
};
|
||||||
|
|
||||||
|
const ConfigForm = (props) => {
|
||||||
|
const updateSession = (el) => session.value = { ...session.value, [el.target.name]: el.target.value }
|
||||||
|
const updateParams = (el) => params.value = { ...params.value, [el.target.name]: el.target.value }
|
||||||
|
const updateParamsFloat = (el) => params.value = { ...params.value, [el.target.name]: parseFloat(el.target.value) }
|
||||||
|
|
||||||
|
return html`
|
||||||
|
<form>
|
||||||
|
<fieldset>
|
||||||
|
<div>
|
||||||
|
<label for="prompt">Prompt</label>
|
||||||
|
<textarea type="text" name="prompt" value="${session.value.prompt}" rows=4 oninput=${updateSession}/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label for="user">User name</label>
|
||||||
|
<input type="text" name="user" value="${session.value.user}" oninput=${updateSession} />
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label for="bot">Bot name</label>
|
||||||
|
<input type="text" name="char" value="${session.value.char}" oninput=${updateSession} />
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label for="template">Prompt template</label>
|
||||||
|
<textarea id="template" name="template" value="${session.value.template}" rows=4 oninput=${updateSession}/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label for="template">Chat history template</label>
|
||||||
|
<textarea id="template" name="historyTemplate" value="${session.value.historyTemplate}" rows=1 oninput=${updateSession}/>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label for="temperature">Temperature</label>
|
||||||
|
<input type="range" id="temperature" min="0.0" max="1.0" step="0.01" name="temperature" value="${params.value.temperature}" oninput=${updateParamsFloat} />
|
||||||
|
<span>${params.value.temperature}</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label for="nPredict">Predictions</label>
|
||||||
|
<input type="range" id="nPredict" min="1" max="2048" step="1" name="n_predict" value="${params.value.n_predict}" oninput=${updateParamsFloat} />
|
||||||
|
<span>${params.value.n_predict}</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label for="repeat_penalty">Penalize repeat sequence</label>
|
||||||
|
<input type="range" id="repeat_penalty" min="0.0" max="2.0" step="0.01" name="repeat_penalty" value="${params.value.repeat_penalty}" oninput=${updateParamsFloat} />
|
||||||
|
<span>${params.value.repeat_penalty}</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<div>
|
||||||
|
<label for="repeat_last_n">Consider N tokens for penalize</label>
|
||||||
|
<input type="range" id="repeat_last_n" min="0.0" max="2048" name="repeat_last_n" value="${params.value.repeat_last_n}" oninput=${updateParamsFloat} />
|
||||||
|
<span>${params.value.repeat_last_n}</span>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
</fieldset>
|
||||||
|
</form>
|
||||||
|
`
|
||||||
|
}
|
||||||
|
// poor mans markdown replacement
|
||||||
|
const Markdownish = (params) => {
|
||||||
|
const md = params.text
|
||||||
|
.replace(/^#{1,6} (.*)$/gim, '<h3>$1</h3>')
|
||||||
|
.replace(/\*\*(.*?)\*\*/g, '<strong>$1</strong>')
|
||||||
|
.replace(/__(.*?)__/g, '<strong>$1</strong>')
|
||||||
|
.replace(/\*(.*?)\*/g, '<em>$1</em>')
|
||||||
|
.replace(/_(.*?)_/g, '<em>$1</em>')
|
||||||
|
.replace(/```.*?\n([\s\S]*?)```/g, '<pre><code>$1</code></pre>')
|
||||||
|
.replace(/`(.*?)`/g, '<code>$1</code>')
|
||||||
|
.replace(/\n/gim, '<br />');
|
||||||
|
return html`<span dangerouslySetInnerHTML=${{ __html: md }} />`;
|
||||||
|
};
|
||||||
|
|
||||||
|
const ModelGenerationInfo = (params) => {
|
||||||
|
if (!llamaStats.value) {
|
||||||
|
return html`<span/>`
|
||||||
|
}
|
||||||
|
return html`
|
||||||
|
<span>
|
||||||
|
${llamaStats.value.predicted_per_token_ms.toFixed()}ms per token, ${llamaStats.value.predicted_per_second.toFixed(2)} tokens per second
|
||||||
|
</span>
|
||||||
|
`
|
||||||
|
}
|
||||||
|
|
||||||
|
function App(props) {
|
||||||
|
|
||||||
|
return html`
|
||||||
|
<div id="container">
|
||||||
|
<header>
|
||||||
|
<h1>llama.cpp</h1>
|
||||||
|
</header>
|
||||||
|
|
||||||
|
<main id="content">
|
||||||
|
<${chatStarted.value ? ChatLog : ConfigForm} />
|
||||||
|
</main>
|
||||||
|
|
||||||
|
<section id="write">
|
||||||
|
<${MessageInput} />
|
||||||
|
</section>
|
||||||
|
|
||||||
|
<footer>
|
||||||
|
<p><${ModelGenerationInfo} /></p>
|
||||||
|
<p>Powered by <a href="https://github.com/ggerganov/llama.cpp">llama.cpp</a> and <a href="https://ggml.ai">ggml.ai</a>.</p>
|
||||||
|
</footer>
|
||||||
|
</div>
|
||||||
|
`;
|
||||||
|
}
|
||||||
|
|
||||||
|
render(h(App), document.body);
|
||||||
|
</script>
|
||||||
|
</head>
|
||||||
|
|
||||||
|
<body>
|
||||||
|
</body>
|
||||||
|
|
||||||
|
</html>
|
1
examples/server/public/index.js
Normal file
1
examples/server/public/index.js
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because it is too large
Load diff
10
expose.cpp
10
expose.cpp
|
@ -47,14 +47,14 @@ extern "C"
|
||||||
}
|
}
|
||||||
|
|
||||||
//first digit is whether configured, second is platform, third is devices
|
//first digit is whether configured, second is platform, third is devices
|
||||||
int parseinfo = inputs.clblast_info;
|
int cl_parseinfo = inputs.clblast_info;
|
||||||
|
|
||||||
std::string usingclblast = "GGML_OPENCL_CONFIGURED="+std::to_string(parseinfo>0?1:0);
|
std::string usingclblast = "GGML_OPENCL_CONFIGURED="+std::to_string(cl_parseinfo>0?1:0);
|
||||||
putenv((char*)usingclblast.c_str());
|
putenv((char*)usingclblast.c_str());
|
||||||
|
|
||||||
parseinfo = parseinfo%100; //keep last 2 digits
|
cl_parseinfo = cl_parseinfo%100; //keep last 2 digits
|
||||||
int platform = parseinfo/10;
|
int platform = cl_parseinfo/10;
|
||||||
int devices = parseinfo%10;
|
int devices = cl_parseinfo%10;
|
||||||
platformenv = "GGML_OPENCL_PLATFORM="+std::to_string(platform);
|
platformenv = "GGML_OPENCL_PLATFORM="+std::to_string(platform);
|
||||||
deviceenv = "GGML_OPENCL_DEVICE="+std::to_string(devices);
|
deviceenv = "GGML_OPENCL_DEVICE="+std::to_string(devices);
|
||||||
putenv((char*)platformenv.c_str());
|
putenv((char*)platformenv.c_str());
|
||||||
|
|
17
expose.h
17
expose.h
|
@ -1,6 +1,19 @@
|
||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
const int stop_token_max = 10;
|
const int stop_token_max = 10;
|
||||||
|
const int ban_token_max = 10;
|
||||||
|
// match kobold's sampler list and order
|
||||||
|
enum samplers
|
||||||
|
{
|
||||||
|
KCPP_SAMPLER_TOP_K=0,
|
||||||
|
KCPP_SAMPLER_TOP_A=1,
|
||||||
|
KCPP_SAMPLER_TOP_P=2,
|
||||||
|
KCPP_SAMPLER_TFS=3,
|
||||||
|
KCPP_SAMPLER_TYP=4,
|
||||||
|
KCPP_SAMPLER_TEMP=5,
|
||||||
|
KCPP_SAMPLER_REP_PEN=6,
|
||||||
|
KCPP_SAMPLER_MAX
|
||||||
|
};
|
||||||
struct load_model_inputs
|
struct load_model_inputs
|
||||||
{
|
{
|
||||||
const int threads;
|
const int threads;
|
||||||
|
@ -18,10 +31,12 @@ struct load_model_inputs
|
||||||
const bool use_smartcontext;
|
const bool use_smartcontext;
|
||||||
const bool unban_tokens;
|
const bool unban_tokens;
|
||||||
const int clblast_info = 0;
|
const int clblast_info = 0;
|
||||||
|
const int cublas_info = 0;
|
||||||
const int blasbatchsize = 512;
|
const int blasbatchsize = 512;
|
||||||
const int debugmode = 0;
|
const int debugmode = 0;
|
||||||
const int forceversion = 0;
|
const int forceversion = 0;
|
||||||
const int gpulayers = 0;
|
const int gpulayers = 0;
|
||||||
|
const char * banned_tokens[ban_token_max];
|
||||||
};
|
};
|
||||||
struct generation_inputs
|
struct generation_inputs
|
||||||
{
|
{
|
||||||
|
@ -40,6 +55,8 @@ struct generation_inputs
|
||||||
const int mirostat = 0;
|
const int mirostat = 0;
|
||||||
const float mirostat_eta;
|
const float mirostat_eta;
|
||||||
const float mirostat_tau;
|
const float mirostat_tau;
|
||||||
|
const samplers sampler_order[KCPP_SAMPLER_MAX];
|
||||||
|
const int sampler_len;
|
||||||
const char * stop_sequence[stop_token_max];
|
const char * stop_sequence[stop_token_max];
|
||||||
const bool stream_sse;
|
const bool stream_sse;
|
||||||
};
|
};
|
||||||
|
|
379
ggml-cuda.cu
379
ggml-cuda.cu
|
@ -70,9 +70,11 @@ typedef void (*ggml_cuda_op_t)(
|
||||||
|
|
||||||
// QK = number of values after dequantization
|
// QK = number of values after dequantization
|
||||||
// QR = QK / number of values before dequantization
|
// QR = QK / number of values before dequantization
|
||||||
|
// QI = number of 32 bit integers before dequantization
|
||||||
|
|
||||||
#define QK4_0 32
|
#define QK4_0 32
|
||||||
#define QR4_0 2
|
#define QR4_0 2
|
||||||
|
#define QI4_0 4
|
||||||
typedef struct {
|
typedef struct {
|
||||||
half d; // delta
|
half d; // delta
|
||||||
uint8_t qs[QK4_0 / 2]; // nibbles / quants
|
uint8_t qs[QK4_0 / 2]; // nibbles / quants
|
||||||
|
@ -81,6 +83,7 @@ static_assert(sizeof(block_q4_0) == sizeof(ggml_fp16_t) + QK4_0 / 2, "wrong q4_0
|
||||||
|
|
||||||
#define QK4_1 32
|
#define QK4_1 32
|
||||||
#define QR4_1 2
|
#define QR4_1 2
|
||||||
|
#define QI4_1 4
|
||||||
typedef struct {
|
typedef struct {
|
||||||
half d; // delta
|
half d; // delta
|
||||||
half m; // min
|
half m; // min
|
||||||
|
@ -90,6 +93,7 @@ static_assert(sizeof(block_q4_1) == sizeof(ggml_fp16_t) * 2 + QK4_1 / 2, "wrong
|
||||||
|
|
||||||
#define QK5_0 32
|
#define QK5_0 32
|
||||||
#define QR5_0 2
|
#define QR5_0 2
|
||||||
|
#define QI5_0 4
|
||||||
typedef struct {
|
typedef struct {
|
||||||
half d; // delta
|
half d; // delta
|
||||||
uint8_t qh[4]; // 5-th bit of quants
|
uint8_t qh[4]; // 5-th bit of quants
|
||||||
|
@ -99,6 +103,7 @@ static_assert(sizeof(block_q5_0) == sizeof(ggml_fp16_t) + sizeof(uint32_t) + QK5
|
||||||
|
|
||||||
#define QK5_1 32
|
#define QK5_1 32
|
||||||
#define QR5_1 2
|
#define QR5_1 2
|
||||||
|
#define QI5_1 4
|
||||||
typedef struct {
|
typedef struct {
|
||||||
half d; // delta
|
half d; // delta
|
||||||
half m; // min
|
half m; // min
|
||||||
|
@ -109,12 +114,25 @@ static_assert(sizeof(block_q5_1) == 2 * sizeof(ggml_fp16_t) + sizeof(uint32_t) +
|
||||||
|
|
||||||
#define QK8_0 32
|
#define QK8_0 32
|
||||||
#define QR8_0 1
|
#define QR8_0 1
|
||||||
|
#define QI8_0 8
|
||||||
typedef struct {
|
typedef struct {
|
||||||
half d; // delta
|
half d; // delta
|
||||||
int8_t qs[QK8_0]; // quants
|
int8_t qs[QK8_0]; // quants
|
||||||
} block_q8_0;
|
} block_q8_0;
|
||||||
static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
|
static_assert(sizeof(block_q8_0) == sizeof(ggml_fp16_t) + QK8_0, "wrong q8_0 block size/padding");
|
||||||
|
|
||||||
|
#define QK8_1 32
|
||||||
|
#define QR8_1 1
|
||||||
|
#define QI8_1 8
|
||||||
|
typedef struct {
|
||||||
|
half d; // delta
|
||||||
|
half s; // unquantized sum
|
||||||
|
int8_t qs[QK8_0]; // quants
|
||||||
|
} block_q8_1;
|
||||||
|
static_assert(sizeof(block_q8_1) == 2*sizeof(ggml_fp16_t) + QK8_0, "wrong q8_1 block size/padding");
|
||||||
|
|
||||||
|
typedef float (*vec_dot_q_cuda_t)(const void * vbq, const block_q8_1 * bq8_1, const int iqs);
|
||||||
|
|
||||||
//================================= k-quants
|
//================================= k-quants
|
||||||
|
|
||||||
#ifdef GGML_QKK_64
|
#ifdef GGML_QKK_64
|
||||||
|
@ -198,14 +216,15 @@ static_assert(sizeof(block_q6_K) == sizeof(ggml_fp16_t) + 13*QK_K/16, "wrong q6_
|
||||||
#define CUDA_SCALE_BLOCK_SIZE 256
|
#define CUDA_SCALE_BLOCK_SIZE 256
|
||||||
#define CUDA_ROPE_BLOCK_SIZE 256
|
#define CUDA_ROPE_BLOCK_SIZE 256
|
||||||
#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32
|
#define CUDA_DIAG_MASK_INF_BLOCK_SIZE 32
|
||||||
|
#define CUDA_QUANTIZE_BLOCK_SIZE 256
|
||||||
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
|
#define CUDA_DEQUANTIZE_BLOCK_SIZE 256
|
||||||
|
|
||||||
// dmmv = dequantize_mul_mat_vec
|
// dmmv = dequantize_mul_mat_vec
|
||||||
#ifndef GGML_CUDA_DMMV_X
|
#ifndef GGML_CUDA_DMMV_X
|
||||||
#define GGML_CUDA_DMMV_X 32
|
#define GGML_CUDA_DMMV_X 32
|
||||||
#endif
|
#endif
|
||||||
#ifndef GGML_CUDA_DMMV_Y
|
#ifndef GGML_CUDA_MMV_Y
|
||||||
#define GGML_CUDA_DMMV_Y 1
|
#define GGML_CUDA_MMV_Y 1
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifndef K_QUANTS_PER_ITERATION
|
#ifndef K_QUANTS_PER_ITERATION
|
||||||
|
@ -270,7 +289,6 @@ static __global__ void rms_norm_f32(const float * x, float * dst, const int ncol
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum up partial sums
|
// sum up partial sums
|
||||||
__syncthreads();
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
||||||
|
@ -714,7 +732,6 @@ static __global__ void dequantize_mul_mat_vec_q2_k(const void * vx, const float
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
__syncthreads();
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
||||||
|
@ -819,7 +836,6 @@ static __global__ void dequantize_mul_mat_vec_q3_k(const void * vx, const float
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
__syncthreads();
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
||||||
|
@ -923,7 +939,6 @@ static __global__ void dequantize_mul_mat_vec_q4_k(const void * vx, const float
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
__syncthreads();
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
||||||
|
@ -1028,7 +1043,6 @@ static __global__ void dequantize_mul_mat_vec_q5_k(const void * vx, const float
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
__syncthreads();
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
||||||
|
@ -1139,7 +1153,6 @@ static __global__ void dequantize_mul_mat_vec_q6_k(const void * vx, const float
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
__syncthreads();
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
||||||
|
@ -1158,6 +1171,41 @@ static __device__ void convert_f16(const void * vx, const int ib, const int iqs,
|
||||||
v.y = x[ib + iqs + 1];
|
v.y = x[ib + iqs + 1];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __global__ void quantize_q8_1(const float * x, void * vy, const int k) {
|
||||||
|
const int i = blockDim.x*blockIdx.x + threadIdx.x;
|
||||||
|
|
||||||
|
if (i >= k) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
block_q8_1 * y = (block_q8_1 *) vy;
|
||||||
|
|
||||||
|
const int ib = i / QK8_0; // block index
|
||||||
|
const int iqs = i % QK8_0; // quant index
|
||||||
|
|
||||||
|
const float xi = x[i];
|
||||||
|
float amax = fabsf(xi);
|
||||||
|
float sum = xi;
|
||||||
|
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
amax = fmaxf(amax, __shfl_xor_sync(0xffffffff, amax, mask, 32));
|
||||||
|
sum += __shfl_xor_sync(0xffffffff, sum, mask, 32);
|
||||||
|
}
|
||||||
|
|
||||||
|
const float d = amax / 127;
|
||||||
|
const int8_t q = amax == 0.0f ? 0 : roundf(xi / d);
|
||||||
|
|
||||||
|
y[ib].qs[iqs] = q;
|
||||||
|
|
||||||
|
if (iqs > 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
y[ib].d = d;
|
||||||
|
y[ib].s = sum;
|
||||||
|
}
|
||||||
|
|
||||||
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
|
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
|
||||||
static __global__ void dequantize_block(const void * vx, float * y, const int k) {
|
static __global__ void dequantize_block(const void * vx, float * y, const int k) {
|
||||||
const int i = blockDim.x*blockIdx.x + 2*threadIdx.x;
|
const int i = blockDim.x*blockIdx.x + 2*threadIdx.x;
|
||||||
|
@ -1179,6 +1227,182 @@ static __global__ void dequantize_block(const void * vx, float * y, const int k)
|
||||||
y[iybs + iqs + y_offset] = v.y;
|
y[iybs + iqs + y_offset] = v.y;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float vec_dot_q4_0_q8_1(const void * vbq, const block_q8_1 * bq8_1, const int iqs) {
|
||||||
|
#if __CUDA_ARCH__ >= 600 // lowest compute capability for integer intrinsics
|
||||||
|
const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq;
|
||||||
|
|
||||||
|
int vi;
|
||||||
|
memcpy(&vi, &bq4_0->qs[sizeof(int) * (iqs + 0)], sizeof(int));
|
||||||
|
const int ui0 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + 0)]);
|
||||||
|
const int ui1 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + QI4_0)]);
|
||||||
|
|
||||||
|
const float d = __half2float(bq4_0->d) * __half2float(bq8_1->d);
|
||||||
|
|
||||||
|
// subtract 8 from each quantized value
|
||||||
|
const int vi0 = __vsub4((vi >> 0) & 0x0F0F0F0F, 0x08080808);
|
||||||
|
const int vi1 = __vsub4((vi >> 4) & 0x0F0F0F0F, 0x08080808);
|
||||||
|
|
||||||
|
// SIMD dot product of quantized values
|
||||||
|
int sumi = __dp4a(vi0, ui0, 0);
|
||||||
|
sumi = __dp4a(vi1, ui1, sumi);
|
||||||
|
|
||||||
|
return sumi*d;
|
||||||
|
#else
|
||||||
|
return 0.0f; // only to satisfy the compiler
|
||||||
|
#endif // __CUDA_ARCH__ >= 600
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float vec_dot_q4_1_q8_1(const void * vbq, const block_q8_1 * bq8_1, const int iqs) {
|
||||||
|
#if __CUDA_ARCH__ >= 600 // lowest compute capability for integer intrinsics
|
||||||
|
const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq;
|
||||||
|
|
||||||
|
const int vi = *((int *) &bq4_1->qs[sizeof(int) * (iqs + 0)]);
|
||||||
|
const int ui0 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + 0)]);
|
||||||
|
const int ui1 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + QI4_1)]);
|
||||||
|
|
||||||
|
const float d = __half2float(bq4_1->d) * __half2float(bq8_1->d);
|
||||||
|
const float m = bq4_1->m;
|
||||||
|
const float s = bq8_1->s;
|
||||||
|
|
||||||
|
const int vi0 = (vi >> 0) & 0x0F0F0F0F;
|
||||||
|
const int vi1 = (vi >> 4) & 0x0F0F0F0F;
|
||||||
|
|
||||||
|
// SIMD dot product of quantized values
|
||||||
|
int sumi = __dp4a(vi0, ui0, 0);
|
||||||
|
sumi = __dp4a(vi1, ui1, sumi);
|
||||||
|
|
||||||
|
return sumi*d + m*s / QI4_1; // scale sum by QI4_1 because there are QI4_1 threads working on this block
|
||||||
|
#else
|
||||||
|
return 0.0f; // only to satisfy the compiler
|
||||||
|
#endif // __CUDA_ARCH__ >= 600
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float vec_dot_q5_0_q8_1(const void * vbq, const block_q8_1 * bq8_1, const int iqs) {
|
||||||
|
#if __CUDA_ARCH__ >= 600 // lowest compute capability for integer intrinsics
|
||||||
|
const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq;
|
||||||
|
|
||||||
|
int qs;
|
||||||
|
memcpy(&qs, &bq5_0->qs[sizeof(int) * (iqs + 0)], sizeof(int));
|
||||||
|
const int qh0 = bq5_0->qh[iqs/2 + 0] >> 4*(iqs%2);
|
||||||
|
const int qh1 = bq5_0->qh[iqs/2 + 2] >> 4*(iqs%2);
|
||||||
|
const int ui0 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + 0)]);
|
||||||
|
const int ui1 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + QI5_0)]);
|
||||||
|
|
||||||
|
const float d = __half2float(bq5_0->d) * __half2float(bq8_1->d);
|
||||||
|
|
||||||
|
int vi0 = (qs >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh0 as 5th bits
|
||||||
|
vi0 |= (qh0 << 4) & 0x00000010; // 1 -> 5
|
||||||
|
vi0 |= (qh0 << 11) & 0x00001000; // 2 -> 13
|
||||||
|
vi0 |= (qh0 << 18) & 0x00100000; // 3 -> 21
|
||||||
|
vi0 |= (qh0 << 25) & 0x10000000; // 4 -> 29
|
||||||
|
vi0 = __vsub4(vi0, 0x10101010); // subtract 16 from quantized values
|
||||||
|
int sumi = __dp4a(vi0, ui0, 0); // SIMD dot product of quantized values
|
||||||
|
|
||||||
|
int vi1 = (qs >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh1 as 5th bits
|
||||||
|
vi1 |= (qh1 << 4) & 0x00000010; // 1 -> 5
|
||||||
|
vi1 |= (qh1 << 11) & 0x00001000; // 2 -> 13
|
||||||
|
vi1 |= (qh1 << 18) & 0x00100000; // 3 -> 21
|
||||||
|
vi1 |= (qh1 << 25) & 0x10000000; // 4 -> 29
|
||||||
|
vi1 = __vsub4(vi1, 0x10101010); // subtract 16 from quantized values
|
||||||
|
sumi = __dp4a(vi1, ui1, sumi); // SIMD dot product of quantized values
|
||||||
|
|
||||||
|
return sumi*d;
|
||||||
|
#else
|
||||||
|
return 0.0f; // only to satisfy the compiler
|
||||||
|
#endif // __CUDA_ARCH__ >= 600
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float vec_dot_q5_1_q8_1(const void * vbq, const block_q8_1 * bq8_1, const int iqs) {
|
||||||
|
#if __CUDA_ARCH__ >= 600 // lowest compute capability for integer intrinsics
|
||||||
|
const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq;
|
||||||
|
|
||||||
|
const int qs = *((int *) &bq5_1->qs[sizeof(int) * (iqs + 0)]);
|
||||||
|
const int qh0 = bq5_1->qh[iqs/2 + 0] >> 4*(iqs%2);
|
||||||
|
const int qh1 = bq5_1->qh[iqs/2 + 2] >> 4*(iqs%2);
|
||||||
|
const int ui0 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + 0)]);
|
||||||
|
const int ui1 = *((int *) &bq8_1->qs[sizeof(int) * (iqs + QI5_1)]);
|
||||||
|
|
||||||
|
const float d = __half2float(bq5_1->d) * __half2float(bq8_1->d);
|
||||||
|
const float m = bq5_1->m;
|
||||||
|
const float s = bq8_1->s;
|
||||||
|
|
||||||
|
int vi0 = (qs >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh0 as 5th bits
|
||||||
|
vi0 |= (qh0 << 4) & 0x00000010; // 1 -> 5
|
||||||
|
vi0 |= (qh0 << 11) & 0x00001000; // 2 -> 13
|
||||||
|
vi0 |= (qh0 << 18) & 0x00100000; // 3 -> 21
|
||||||
|
vi0 |= (qh0 << 25) & 0x10000000; // 4 -> 29
|
||||||
|
int sumi = __dp4a(vi0, ui0, 0); // SIMD dot product of quantized values
|
||||||
|
|
||||||
|
int vi1 = (qs >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh1 as 5th bits
|
||||||
|
vi1 |= (qh1 << 4) & 0x00000010; // 1 -> 5
|
||||||
|
vi1 |= (qh1 << 11) & 0x00001000; // 2 -> 13
|
||||||
|
vi1 |= (qh1 << 18) & 0x00100000; // 3 -> 21
|
||||||
|
vi1 |= (qh1 << 25) & 0x10000000; // 4 -> 29
|
||||||
|
sumi = __dp4a(vi1, ui1, sumi); // SIMD dot product of quantized values
|
||||||
|
|
||||||
|
return sumi*d + m*s / QI5_1; // scale sum by QI5_1 because there are QI5_1 threads working on this block
|
||||||
|
#else
|
||||||
|
return 0.0f; // only to satisfy the compiler
|
||||||
|
#endif // __CUDA_ARCH__ >= 600
|
||||||
|
}
|
||||||
|
|
||||||
|
static __device__ __forceinline__ float vec_dot_q8_0_q8_1(const void * vbq, const block_q8_1 * bq8_1, const int iqs) {
|
||||||
|
#if __CUDA_ARCH__ >= 600 // lowest compute capability for integer intrinsics
|
||||||
|
const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq;
|
||||||
|
|
||||||
|
int vi;
|
||||||
|
memcpy(&vi, &bq8_0->qs[sizeof(int) * (iqs + 0)], sizeof(int));
|
||||||
|
const int ui = *((int *) &bq8_1->qs[sizeof(int) * (iqs + 0)]);
|
||||||
|
|
||||||
|
const float d = __half2float(bq8_0->d) * __half2float(bq8_1->d);
|
||||||
|
|
||||||
|
// SIMD dot product of quantized values
|
||||||
|
int sumi = __dp4a(vi, ui, 0);
|
||||||
|
|
||||||
|
return sumi*d;
|
||||||
|
#else
|
||||||
|
return 0.0f; // only to satisfy the compiler
|
||||||
|
#endif // __CUDA_ARCH__ >= 600
|
||||||
|
}
|
||||||
|
|
||||||
|
template <int qk, int qi, typename block_q_t, vec_dot_q_cuda_t vec_dot_q_cuda>
|
||||||
|
static __global__ void mul_mat_vec_q(const void * vx, const void * vy, float * dst, const int ncols, const int nrows) {
|
||||||
|
const int row = blockIdx.y*blockDim.y + threadIdx.y;
|
||||||
|
|
||||||
|
if (row >= nrows) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const int blocks_per_row = ncols / qk;
|
||||||
|
const int blocks_per_warp = WARP_SIZE / qi;
|
||||||
|
|
||||||
|
// partial sum for each thread
|
||||||
|
float tmp = 0.0f;
|
||||||
|
|
||||||
|
const block_q_t * x = (const block_q_t *) vx;
|
||||||
|
const block_q8_1 * y = (const block_q8_1 *) vy;
|
||||||
|
|
||||||
|
for (int i = 0; i < blocks_per_row; i += blocks_per_warp) {
|
||||||
|
const int ibx = row*blocks_per_row + i + threadIdx.x / qi; // x block index
|
||||||
|
|
||||||
|
const int iby = i + threadIdx.x / qi; // y block index
|
||||||
|
|
||||||
|
const int iqs = threadIdx.x % qi; // x block quant index when casting the quants to int
|
||||||
|
|
||||||
|
tmp += vec_dot_q_cuda(&x[ibx], &y[iby], iqs);
|
||||||
|
}
|
||||||
|
|
||||||
|
// sum up partial sums and write back result
|
||||||
|
#pragma unroll
|
||||||
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
|
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (threadIdx.x == 0) {
|
||||||
|
dst[row] = tmp;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
|
template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
|
||||||
static __global__ void dequantize_mul_mat_vec(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) {
|
static __global__ void dequantize_mul_mat_vec(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows) {
|
||||||
// qk = quantized weights per x block
|
// qk = quantized weights per x block
|
||||||
|
@ -1233,7 +1457,6 @@ static __global__ void dequantize_mul_mat_vec(const void * vx, const dfloat * y,
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
__syncthreads();
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
||||||
|
@ -1284,7 +1507,6 @@ static __global__ void mul_mat_p021_f16_f32(const void * vx, const float * y, fl
|
||||||
const int idst = channel*nrows_dst + row_dst;
|
const int idst = channel*nrows_dst + row_dst;
|
||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
__syncthreads();
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
||||||
|
@ -1330,7 +1552,6 @@ static __global__ void mul_mat_vec_nc_f16_f32( // nc == non-contiguous
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum up partial sums and write back result
|
// sum up partial sums and write back result
|
||||||
__syncthreads();
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
||||||
|
@ -1440,7 +1661,6 @@ static __global__ void soft_max_f32(const float * x, float * dst, const int ncol
|
||||||
}
|
}
|
||||||
|
|
||||||
// sum up partial sums
|
// sum up partial sums
|
||||||
__syncthreads();
|
|
||||||
#pragma unroll
|
#pragma unroll
|
||||||
for (int mask = 16; mask > 0; mask >>= 1) {
|
for (int mask = 16; mask > 0; mask >>= 1) {
|
||||||
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
tmp += __shfl_xor_sync(0xffffffff, tmp, mask, 32);
|
||||||
|
@ -1494,6 +1714,11 @@ static void rms_norm_f32_cuda(const float * x, float * dst, const int ncols, con
|
||||||
rms_norm_f32<<<nrows, block_dims, 0, stream>>>(x, dst, ncols);
|
rms_norm_f32<<<nrows, block_dims, 0, stream>>>(x, dst, ncols);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void quantize_row_q8_1_cuda(const float * x, void * vy, const int k, cudaStream_t stream) {
|
||||||
|
const int num_blocks = (k + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE;
|
||||||
|
quantize_q8_1<<<num_blocks, CUDA_QUANTIZE_BLOCK_SIZE, 0, stream>>>(x, vy, k);
|
||||||
|
}
|
||||||
|
|
||||||
static void dequantize_row_q4_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
|
static void dequantize_row_q4_0_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
|
||||||
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
|
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
|
||||||
dequantize_block<QK4_0, QR4_0, dequantize_q4_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
|
dequantize_block<QK4_0, QR4_0, dequantize_q4_0><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
|
||||||
|
@ -1562,45 +1787,45 @@ static void dequantize_row_q6_K_cuda(const void * vx, float * y, const int k, cu
|
||||||
|
|
||||||
static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
static void dequantize_mul_mat_vec_q4_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
const int block_num_y = (nrows + GGML_CUDA_DMMV_Y - 1) / GGML_CUDA_DMMV_Y;
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
const dim3 block_nums(1, block_num_y, 1);
|
const dim3 block_nums(1, block_num_y, 1);
|
||||||
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>
|
dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>
|
||||||
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
static void dequantize_mul_mat_vec_q4_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
const int block_num_y = (nrows + GGML_CUDA_DMMV_Y - 1) / GGML_CUDA_DMMV_Y;
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
const dim3 block_nums(1, block_num_y, 1);
|
const dim3 block_nums(1, block_num_y, 1);
|
||||||
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>
|
dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>
|
||||||
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
static void dequantize_mul_mat_vec_q5_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
const int block_num_y = (nrows + GGML_CUDA_DMMV_Y - 1) / GGML_CUDA_DMMV_Y;
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
const dim3 block_nums(1, block_num_y, 1);
|
const dim3 block_nums(1, block_num_y, 1);
|
||||||
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>
|
dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>
|
||||||
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
static void dequantize_mul_mat_vec_q5_1_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
const int block_num_y = (nrows + GGML_CUDA_DMMV_Y - 1) / GGML_CUDA_DMMV_Y;
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
const dim3 block_nums(1, block_num_y, 1);
|
const dim3 block_nums(1, block_num_y, 1);
|
||||||
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>
|
dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>
|
||||||
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
static void dequantize_mul_mat_vec_q8_0_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
const int block_num_y = (nrows + GGML_CUDA_DMMV_Y - 1) / GGML_CUDA_DMMV_Y;
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
const dim3 block_nums(1, block_num_y, 1);
|
const dim3 block_nums(1, block_num_y, 1);
|
||||||
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>
|
dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>
|
||||||
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
}
|
}
|
||||||
|
@ -1647,6 +1872,51 @@ static void dequantize_mul_mat_vec_q6_K_cuda(const void * vx, const float * y, f
|
||||||
dequantize_mul_mat_vec_q6_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
dequantize_mul_mat_vec_q6_k<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void mul_mat_vec_q4_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
|
const dim3 block_nums(1, block_num_y, 1);
|
||||||
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
|
mul_mat_vec_q<QK4_0, QI4_0, block_q4_0, vec_dot_q4_0_q8_1>
|
||||||
|
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mul_mat_vec_q4_1_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
|
const dim3 block_nums(1, block_num_y, 1);
|
||||||
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
|
mul_mat_vec_q<QK4_0, QI4_1, block_q4_1, vec_dot_q4_1_q8_1>
|
||||||
|
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mul_mat_vec_q5_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
|
const dim3 block_nums(1, block_num_y, 1);
|
||||||
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
|
mul_mat_vec_q<QK5_0, QI5_0, block_q5_0, vec_dot_q5_0_q8_1>
|
||||||
|
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mul_mat_vec_q5_1_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
|
const dim3 block_nums(1, block_num_y, 1);
|
||||||
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
|
mul_mat_vec_q<QK5_1, QI5_1, block_q5_1, vec_dot_q5_1_q8_1>
|
||||||
|
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void mul_mat_vec_q8_0_q8_1_cuda(const void * vx, const void * vy, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
|
const dim3 block_nums(1, block_num_y, 1);
|
||||||
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
|
mul_mat_vec_q<QK8_0, QI8_0, block_q8_0, vec_dot_q8_0_q8_1>
|
||||||
|
<<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ncols, nrows);
|
||||||
|
}
|
||||||
|
|
||||||
static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
|
static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, cudaStream_t stream) {
|
||||||
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
|
const int num_blocks = (k + CUDA_DEQUANTIZE_BLOCK_SIZE - 1) / CUDA_DEQUANTIZE_BLOCK_SIZE;
|
||||||
dequantize_block<1, 1, convert_f16><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
|
dequantize_block<1, 1, convert_f16><<<num_blocks, CUDA_DEQUANTIZE_BLOCK_SIZE, 0, stream>>>(vx, y, k);
|
||||||
|
@ -1654,9 +1924,9 @@ static void convert_fp16_to_fp32_cuda(const void * vx, float * y, const int k, c
|
||||||
|
|
||||||
static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
static void convert_mul_mat_vec_f16_cuda(const void * vx, const dfloat * y, float * dst, const int ncols, const int nrows, cudaStream_t stream) {
|
||||||
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
GGML_ASSERT(ncols % GGML_CUDA_DMMV_X == 0);
|
||||||
const int block_num_y = (nrows + GGML_CUDA_DMMV_Y - 1) / GGML_CUDA_DMMV_Y;
|
const int block_num_y = (nrows + GGML_CUDA_MMV_Y - 1) / GGML_CUDA_MMV_Y;
|
||||||
const dim3 block_nums(1, block_num_y, 1);
|
const dim3 block_nums(1, block_num_y, 1);
|
||||||
const dim3 block_dims(WARP_SIZE, GGML_CUDA_DMMV_Y, 1);
|
const dim3 block_dims(WARP_SIZE, GGML_CUDA_MMV_Y, 1);
|
||||||
dequantize_mul_mat_vec<1, 1, convert_f16>
|
dequantize_mul_mat_vec<1, 1, convert_f16>
|
||||||
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
<<<block_nums, block_dims, 0, stream>>>(vx, y, dst, ncols, nrows);
|
||||||
}
|
}
|
||||||
|
@ -1847,6 +2117,7 @@ static size_t g_scratch_offset = 0;
|
||||||
|
|
||||||
static int g_device_count = -1;
|
static int g_device_count = -1;
|
||||||
static int g_main_device = 0;
|
static int g_main_device = 0;
|
||||||
|
static int g_compute_capabilities[GGML_CUDA_MAX_DEVICES];
|
||||||
static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
|
static float g_tensor_split[GGML_CUDA_MAX_DEVICES] = {0};
|
||||||
|
|
||||||
static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
|
static cublasHandle_t g_cublas_handles[GGML_CUDA_MAX_DEVICES] = {nullptr};
|
||||||
|
@ -1864,9 +2135,12 @@ void ggml_init_cublas() {
|
||||||
for (int id = 0; id < g_device_count; ++id) {
|
for (int id = 0; id < g_device_count; ++id) {
|
||||||
cudaDeviceProp prop;
|
cudaDeviceProp prop;
|
||||||
CUDA_CHECK(cudaGetDeviceProperties(&prop, id));
|
CUDA_CHECK(cudaGetDeviceProperties(&prop, id));
|
||||||
fprintf(stderr, " Device %d: %s\n", id, prop.name);
|
fprintf(stderr, " Device %d: %s, compute capability %d.%d\n", id, prop.name, prop.major, prop.minor);
|
||||||
|
|
||||||
g_tensor_split[id] = total_vram;
|
g_tensor_split[id] = total_vram;
|
||||||
total_vram += prop.totalGlobalMem;
|
total_vram += prop.totalGlobalMem;
|
||||||
|
|
||||||
|
g_compute_capabilities[id] = 100*prop.major + 10*prop.minor;
|
||||||
}
|
}
|
||||||
for (int id = 0; id < g_device_count; ++id) {
|
for (int id = 0; id < g_device_count; ++id) {
|
||||||
g_tensor_split[id] /= total_vram;
|
g_tensor_split[id] /= total_vram;
|
||||||
|
@ -2082,7 +2356,7 @@ inline void ggml_cuda_op_rms_norm(
|
||||||
(void) i1;
|
(void) i1;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline void ggml_cuda_op_dequantize_mul_mat_vec(
|
inline void ggml_cuda_op_mul_mat_vec(
|
||||||
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
|
const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, char * src0_ddq_i,
|
||||||
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
|
float * src0_ddf_i, float * src1_ddf_i, float * dst_ddf_i, int64_t i02, int64_t i01_low, int64_t i01_high, int i1,
|
||||||
cudaStream_t & cudaStream_main){
|
cudaStream_t & cudaStream_main){
|
||||||
|
@ -2094,6 +2368,52 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec(
|
||||||
const int64_t ne00 = src0->ne[0];
|
const int64_t ne00 = src0->ne[0];
|
||||||
const int64_t nrows = i01_high - i01_low;
|
const int64_t nrows = i01_high - i01_low;
|
||||||
|
|
||||||
|
#ifdef GGML_CUDA_FORCE_DMMV
|
||||||
|
const bool use_mul_mat_vec_q = false;
|
||||||
|
#else
|
||||||
|
int id;
|
||||||
|
CUDA_CHECK(cudaGetDevice(&id));
|
||||||
|
|
||||||
|
const bool mul_mat_vec_q_implemented = src0->type == GGML_TYPE_Q4_0 ||
|
||||||
|
src0->type == GGML_TYPE_Q4_1 ||
|
||||||
|
src0->type == GGML_TYPE_Q5_0 ||
|
||||||
|
src0->type == GGML_TYPE_Q5_1 ||
|
||||||
|
src0->type == GGML_TYPE_Q8_0;
|
||||||
|
|
||||||
|
// The integer intrinsics used in mul_mat_vec_q are available with compute capability 6.
|
||||||
|
// However, they have bad performance with Pascal cards.
|
||||||
|
// Therefore, in a multi GPU setting decide at runtime which GPUs should use mul_mat_vec_q.
|
||||||
|
const bool use_mul_mat_vec_q = g_compute_capabilities[id] >= 700 && mul_mat_vec_q_implemented;
|
||||||
|
#endif
|
||||||
|
|
||||||
|
if (use_mul_mat_vec_q) {
|
||||||
|
size_t as;
|
||||||
|
void * src1_q8_1 = ggml_cuda_pool_malloc(ne00*sizeof(block_q8_1)/QK8_1, &as);
|
||||||
|
quantize_row_q8_1_cuda(src1_ddf_i, src1_q8_1, ne00, cudaStream_main);
|
||||||
|
|
||||||
|
switch (src0->type) {
|
||||||
|
case GGML_TYPE_Q4_0:
|
||||||
|
mul_mat_vec_q4_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q4_1:
|
||||||
|
mul_mat_vec_q4_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q5_0:
|
||||||
|
mul_mat_vec_q5_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q5_1:
|
||||||
|
mul_mat_vec_q5_1_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
|
||||||
|
break;
|
||||||
|
case GGML_TYPE_Q8_0:
|
||||||
|
mul_mat_vec_q8_0_q8_1_cuda(src0_ddq_i, src1_q8_1, dst_ddf_i, ne00, nrows, cudaStream_main);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
GGML_ASSERT(false);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
ggml_cuda_pool_free(src1_q8_1, as);
|
||||||
|
} else {
|
||||||
// on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
|
// on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
|
||||||
#ifdef GGML_CUDA_DMMV_F16
|
#ifdef GGML_CUDA_DMMV_F16
|
||||||
size_t ash;
|
size_t ash;
|
||||||
|
@ -2157,6 +2477,7 @@ inline void ggml_cuda_op_dequantize_mul_mat_vec(
|
||||||
ggml_cuda_pool_free(src1_dfloat, ash);
|
ggml_cuda_pool_free(src1_dfloat, ash);
|
||||||
}
|
}
|
||||||
#endif // GGML_CUDA_DMMV_F16
|
#endif // GGML_CUDA_DMMV_F16
|
||||||
|
}
|
||||||
|
|
||||||
(void) src1;
|
(void) src1;
|
||||||
(void) dst;
|
(void) dst;
|
||||||
|
@ -2729,8 +3050,8 @@ void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1, ggml_
|
||||||
}else if (src0->type == GGML_TYPE_F32) {
|
}else if (src0->type == GGML_TYPE_F32) {
|
||||||
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
|
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
|
||||||
} else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) {
|
} else if (ggml_is_quantized(src0->type) || src0->type == GGML_TYPE_F16) {
|
||||||
if (src1->ne[1] == 1 && src0->ne[0] % GGML_CUDA_DMMV_X == 0 && src0->ne[1] % GGML_CUDA_DMMV_Y == 0) {
|
if (src1->ne[1] == 1 && src0->ne[0] % GGML_CUDA_DMMV_X == 0) {
|
||||||
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_dequantize_mul_mat_vec, false, false);
|
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_vec, false, false);
|
||||||
} else {
|
} else {
|
||||||
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
|
ggml_cuda_op(src0, src1, dst, ggml_cuda_op_mul_mat_cublas, true, false);
|
||||||
}
|
}
|
||||||
|
@ -2863,7 +3184,7 @@ void ggml_cuda_transform_tensor(void * data, struct ggml_tensor * tensor) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void ggml_cuda_free_data(struct ggml_tensor * tensor) {
|
void ggml_cuda_free_data(struct ggml_tensor * tensor) {
|
||||||
if (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) {
|
if (!tensor || (tensor->backend != GGML_BACKEND_GPU && tensor->backend != GGML_BACKEND_GPU_SPLIT) ) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -654,13 +654,17 @@ __kernel void dequantize_mul_mat_vec_q6_K(__global const struct block_q6_K * xx,
|
||||||
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
|
||||||
const int in = tid - step*im; // 0...15 or 0...7
|
const int in = tid - step*im; // 0...15 or 0...7
|
||||||
|
|
||||||
#if K_QUANTS_PER_ITERATION == 1
|
\n#if K_QUANTS_PER_ITERATION == 1\n
|
||||||
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
|
const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
|
||||||
const int is = 0;
|
const int is = 0;
|
||||||
#else
|
|
||||||
|
\n#else\n
|
||||||
|
|
||||||
const int l0 = 4 * in; // 0, 4, 8, ..., 28
|
const int l0 = 4 * in; // 0, 4, 8, ..., 28
|
||||||
const int is = in / 4;
|
const int is = in / 4;
|
||||||
#endif
|
|
||||||
|
\n#endif\n
|
||||||
|
|
||||||
const int ql_offset = 64*im + l0;
|
const int ql_offset = 64*im + l0;
|
||||||
const int qh_offset = 32*im + l0;
|
const int qh_offset = 32*im + l0;
|
||||||
const int s_offset = 8*im + is;
|
const int s_offset = 8*im + is;
|
||||||
|
@ -677,7 +681,7 @@ __kernel void dequantize_mul_mat_vec_q6_K(__global const struct block_q6_K * xx,
|
||||||
|
|
||||||
const float d = vload_half(0, &x[i].d);
|
const float d = vload_half(0, &x[i].d);
|
||||||
|
|
||||||
#if K_QUANTS_PER_ITERATION == 1
|
\n#if K_QUANTS_PER_ITERATION == 1\n
|
||||||
float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
|
float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
|
||||||
+ y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
|
+ y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
|
||||||
+ y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
|
+ y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
|
||||||
|
@ -687,7 +691,7 @@ __kernel void dequantize_mul_mat_vec_q6_K(__global const struct block_q6_K * xx,
|
||||||
+ y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
|
+ y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
|
||||||
+y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
|
+y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
|
||||||
tmp[16 * ix + tid] += sum;
|
tmp[16 * ix + tid] += sum;
|
||||||
#else
|
\n#else\n
|
||||||
float sum = 0;
|
float sum = 0;
|
||||||
for (int l = 0; l < 4; ++l) {
|
for (int l = 0; l < 4; ++l) {
|
||||||
sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
|
sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
|
||||||
|
@ -696,7 +700,7 @@ __kernel void dequantize_mul_mat_vec_q6_K(__global const struct block_q6_K * xx,
|
||||||
+ y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
|
+ y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
|
||||||
}
|
}
|
||||||
tmp[16 * ix + tid] += sum;
|
tmp[16 * ix + tid] += sum;
|
||||||
#endif
|
\n#endif\n
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1384,7 +1388,7 @@ static void ggml_cl_mul_f32(const ggml_tensor * src0, const ggml_tensor * src1,
|
||||||
const int64_t ne00 = src0->ne[0];
|
const int64_t ne00 = src0->ne[0];
|
||||||
const int64_t ne01 = src0->ne[1];
|
const int64_t ne01 = src0->ne[1];
|
||||||
const int64_t ne02 = src0->ne[2];
|
const int64_t ne02 = src0->ne[2];
|
||||||
const int64_t ne03 = src0->ne[2];
|
const int64_t ne03 = src0->ne[3];
|
||||||
const int64_t ne0 = ne00 * ne01 * ne02 * ne03;
|
const int64_t ne0 = ne00 * ne01 * ne02 * ne03;
|
||||||
const int64_t ne10 = src1->ne[0];
|
const int64_t ne10 = src1->ne[0];
|
||||||
const int64_t ne11 = src1->ne[1];
|
const int64_t ne11 = src1->ne[1];
|
||||||
|
|
141
ggml.h
141
ggml.h
|
@ -201,6 +201,8 @@
|
||||||
#define GGML_MAX_NAME 48
|
#define GGML_MAX_NAME 48
|
||||||
#define GGML_DEFAULT_N_THREADS 4
|
#define GGML_DEFAULT_N_THREADS 4
|
||||||
|
|
||||||
|
#define GGML_UNUSED(x) (void)(x)
|
||||||
|
|
||||||
#define GGML_ASSERT(x) \
|
#define GGML_ASSERT(x) \
|
||||||
do { \
|
do { \
|
||||||
if (!(x)) { \
|
if (!(x)) { \
|
||||||
|
@ -209,6 +211,30 @@
|
||||||
} \
|
} \
|
||||||
} while (0)
|
} while (0)
|
||||||
|
|
||||||
|
// used to copy the number of elements and stride in bytes of tensors into local variables.
|
||||||
|
// main purpose is to reduce code duplication and improve readability.
|
||||||
|
//
|
||||||
|
// example:
|
||||||
|
//
|
||||||
|
// GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
|
||||||
|
// GGML_TENSOR_LOCALS(size_t, nb1, src1, nb);
|
||||||
|
//
|
||||||
|
#define GGML_TENSOR_LOCALS_1(type, prefix, pointer, array) \
|
||||||
|
const type prefix##0 = (pointer)->array[0]; \
|
||||||
|
GGML_UNUSED(prefix##0);
|
||||||
|
#define GGML_TENSOR_LOCALS_2(type, prefix, pointer, array) \
|
||||||
|
GGML_TENSOR_LOCALS_1 (type, prefix, pointer, array) \
|
||||||
|
const type prefix##1 = (pointer)->array[1]; \
|
||||||
|
GGML_UNUSED(prefix##1);
|
||||||
|
#define GGML_TENSOR_LOCALS_3(type, prefix, pointer, array) \
|
||||||
|
GGML_TENSOR_LOCALS_2 (type, prefix, pointer, array) \
|
||||||
|
const type prefix##2 = (pointer)->array[2]; \
|
||||||
|
GGML_UNUSED(prefix##2);
|
||||||
|
#define GGML_TENSOR_LOCALS(type, prefix, pointer, array) \
|
||||||
|
GGML_TENSOR_LOCALS_3 (type, prefix, pointer, array) \
|
||||||
|
const type prefix##3 = (pointer)->array[3]; \
|
||||||
|
GGML_UNUSED(prefix##3);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
extern "C" {
|
extern "C" {
|
||||||
#endif
|
#endif
|
||||||
|
@ -224,8 +250,8 @@ extern "C" {
|
||||||
GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
|
GGML_API float ggml_fp16_to_fp32(ggml_fp16_t x);
|
||||||
GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
|
GGML_API ggml_fp16_t ggml_fp32_to_fp16(float x);
|
||||||
|
|
||||||
GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, size_t n);
|
GGML_API void ggml_fp16_to_fp32_row(const ggml_fp16_t * x, float * y, int n);
|
||||||
GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, size_t n);
|
GGML_API void ggml_fp32_to_fp16_row(const float * x, ggml_fp16_t * y, int n);
|
||||||
|
|
||||||
struct ggml_object;
|
struct ggml_object;
|
||||||
struct ggml_context;
|
struct ggml_context;
|
||||||
|
@ -295,12 +321,15 @@ extern "C" {
|
||||||
GGML_OP_SUM,
|
GGML_OP_SUM,
|
||||||
GGML_OP_SUM_ROWS,
|
GGML_OP_SUM_ROWS,
|
||||||
GGML_OP_MEAN,
|
GGML_OP_MEAN,
|
||||||
|
GGML_OP_ARGMAX,
|
||||||
GGML_OP_REPEAT,
|
GGML_OP_REPEAT,
|
||||||
GGML_OP_REPEAT_BACK,
|
GGML_OP_REPEAT_BACK,
|
||||||
GGML_OP_ABS,
|
GGML_OP_ABS,
|
||||||
GGML_OP_SGN,
|
GGML_OP_SGN,
|
||||||
GGML_OP_NEG,
|
GGML_OP_NEG,
|
||||||
GGML_OP_STEP,
|
GGML_OP_STEP,
|
||||||
|
GGML_OP_TANH,
|
||||||
|
GGML_OP_ELU,
|
||||||
GGML_OP_RELU,
|
GGML_OP_RELU,
|
||||||
GGML_OP_GELU,
|
GGML_OP_GELU,
|
||||||
GGML_OP_GELU_QUICK,
|
GGML_OP_GELU_QUICK,
|
||||||
|
@ -332,9 +361,8 @@ extern "C" {
|
||||||
GGML_OP_ROPE_BACK,
|
GGML_OP_ROPE_BACK,
|
||||||
GGML_OP_ALIBI,
|
GGML_OP_ALIBI,
|
||||||
GGML_OP_CLAMP,
|
GGML_OP_CLAMP,
|
||||||
GGML_OP_CONV_1D_S1_PH,
|
GGML_OP_CONV_1D,
|
||||||
GGML_OP_CONV_1D_S2_PH,
|
GGML_OP_CONV_2D,
|
||||||
GGML_OP_CONV_2D_SK_P0,
|
|
||||||
|
|
||||||
GGML_OP_FLASH_ATTN,
|
GGML_OP_FLASH_ATTN,
|
||||||
GGML_OP_FLASH_FF,
|
GGML_OP_FLASH_FF,
|
||||||
|
@ -692,6 +720,11 @@ extern "C" {
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a);
|
struct ggml_tensor * a);
|
||||||
|
|
||||||
|
// argmax along rows
|
||||||
|
GGML_API struct ggml_tensor * ggml_argmax(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a);
|
||||||
|
|
||||||
// if a is the same shape as b, and a is not parameter, return a
|
// if a is the same shape as b, and a is not parameter, return a
|
||||||
// otherwise, return a new tensor: repeat(a) to fit in b
|
// otherwise, return a new tensor: repeat(a) to fit in b
|
||||||
GGML_API struct ggml_tensor * ggml_repeat(
|
GGML_API struct ggml_tensor * ggml_repeat(
|
||||||
|
@ -736,6 +769,22 @@ extern "C" {
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a);
|
struct ggml_tensor * a);
|
||||||
|
|
||||||
|
GGML_API struct ggml_tensor * ggml_tanh(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a);
|
||||||
|
|
||||||
|
GGML_API struct ggml_tensor * ggml_tanh_inplace(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a);
|
||||||
|
|
||||||
|
GGML_API struct ggml_tensor * ggml_elu(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a);
|
||||||
|
|
||||||
|
GGML_API struct ggml_tensor * ggml_elu_inplace(
|
||||||
|
struct ggml_context * ctx,
|
||||||
|
struct ggml_tensor * a);
|
||||||
|
|
||||||
GGML_API struct ggml_tensor * ggml_relu(
|
GGML_API struct ggml_tensor * ggml_relu(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a);
|
struct ggml_tensor * a);
|
||||||
|
@ -1086,58 +1135,33 @@ extern "C" {
|
||||||
float min,
|
float min,
|
||||||
float max);
|
float max);
|
||||||
|
|
||||||
// TODO: implement general-purpose convolutions
|
GGML_API struct ggml_tensor * ggml_conv_1d(
|
||||||
// GGML_API struct ggml_tensor * ggml_conv_1d(
|
|
||||||
// struct ggml_context * ctx,
|
|
||||||
// struct ggml_tensor * a,
|
|
||||||
// struct ggml_tensor * b,
|
|
||||||
// int s0
|
|
||||||
// int p0,
|
|
||||||
// int d0);
|
|
||||||
//
|
|
||||||
// GGML_API struct ggml_tensor * ggml_conv_2d(
|
|
||||||
// struct ggml_context * ctx,
|
|
||||||
// struct ggml_tensor * a,
|
|
||||||
// struct ggml_tensor * b,
|
|
||||||
// int s0,
|
|
||||||
// int s1,
|
|
||||||
// int p0,
|
|
||||||
// int p1,
|
|
||||||
// int d0,
|
|
||||||
// int d1);
|
|
||||||
|
|
||||||
// padding = half
|
|
||||||
// TODO: we don't support extra parameters for now
|
|
||||||
// that's why we are hard-coding the stride, padding, and dilation
|
|
||||||
// not great ..
|
|
||||||
// example:
|
|
||||||
// a: 3 80 768 1
|
|
||||||
// b: 3000 80 1 1
|
|
||||||
// res: 3000 768 1 1
|
|
||||||
// used in whisper
|
|
||||||
GGML_API struct ggml_tensor * ggml_conv_1d_s1_ph(
|
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b);
|
struct ggml_tensor * b,
|
||||||
|
int s0, // stride
|
||||||
|
int p0, // padding
|
||||||
|
int d0); // dilation
|
||||||
|
|
||||||
// used in whisper
|
GGML_API struct ggml_tensor * ggml_conv_2d(
|
||||||
GGML_API struct ggml_tensor * ggml_conv_1d_s2_ph(
|
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b);
|
struct ggml_tensor * b,
|
||||||
|
int s0,
|
||||||
|
int s1,
|
||||||
|
int p0,
|
||||||
|
int p1,
|
||||||
|
int d0,
|
||||||
|
int d1);
|
||||||
|
|
||||||
// kernel size is a->ne[0] x a->ne[1]
|
// conv_1d with padding = half
|
||||||
// stride is equal to kernel size
|
// alias for ggml_conv_1d(a, b, s, a->ne[0]/2, d)
|
||||||
// padding is zero
|
GGML_API struct ggml_tensor* ggml_conv_1d_ph(
|
||||||
// example:
|
|
||||||
// a: 16 16 3 768
|
|
||||||
// b: 1024 1024 3 1
|
|
||||||
// res: 64 64 768 1
|
|
||||||
// used in sam
|
|
||||||
GGML_API struct ggml_tensor * ggml_conv_2d_sk_p0(
|
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
struct ggml_tensor * a,
|
struct ggml_tensor * a,
|
||||||
struct ggml_tensor * b);
|
struct ggml_tensor * b,
|
||||||
|
int s,
|
||||||
|
int d);
|
||||||
|
|
||||||
GGML_API struct ggml_tensor * ggml_flash_attn(
|
GGML_API struct ggml_tensor * ggml_flash_attn(
|
||||||
struct ggml_context * ctx,
|
struct ggml_context * ctx,
|
||||||
|
@ -1498,20 +1522,19 @@ extern "C" {
|
||||||
#else
|
#else
|
||||||
#define GGML_RESTRICT restrict
|
#define GGML_RESTRICT restrict
|
||||||
#endif
|
#endif
|
||||||
typedef void (*dequantize_row_q_t)(const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);
|
typedef void (*ggml_to_float_t) (const void * GGML_RESTRICT x, float * GGML_RESTRICT y, int k);
|
||||||
typedef void (*quantize_row_q_t) (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k);
|
typedef void (*ggml_from_float_t)(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int k);
|
||||||
typedef void (*vec_dot_q_t) (const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y);
|
typedef void (*ggml_vec_dot_t) (const int n, float * GGML_RESTRICT s, const void * GGML_RESTRICT x, const void * GGML_RESTRICT y);
|
||||||
|
|
||||||
typedef struct {
|
typedef struct {
|
||||||
dequantize_row_q_t dequantize_row_q;
|
ggml_to_float_t to_float;
|
||||||
quantize_row_q_t quantize_row_q;
|
ggml_from_float_t from_float;
|
||||||
quantize_row_q_t quantize_row_q_reference;
|
ggml_from_float_t from_float_reference;
|
||||||
quantize_row_q_t quantize_row_q_dot;
|
ggml_vec_dot_t vec_dot;
|
||||||
vec_dot_q_t vec_dot_q;
|
|
||||||
enum ggml_type vec_dot_type;
|
enum ggml_type vec_dot_type;
|
||||||
} quantize_fns_t;
|
} ggml_type_traits_t;
|
||||||
|
|
||||||
quantize_fns_t ggml_internal_get_quantize_fn(size_t i);
|
ggml_type_traits_t ggml_internal_get_type_traits(enum ggml_type i);
|
||||||
|
|
||||||
#ifdef __cplusplus
|
#ifdef __cplusplus
|
||||||
}
|
}
|
||||||
|
|
|
@ -78,6 +78,8 @@ static size_t mem_per_token = 0;
|
||||||
static std::vector<float> logits;
|
static std::vector<float> logits;
|
||||||
static std::vector<int> smartcontext;
|
static std::vector<int> smartcontext;
|
||||||
static std::vector<std::string> stop_sequence;
|
static std::vector<std::string> stop_sequence;
|
||||||
|
static std::vector<std::string> banned_tokens;
|
||||||
|
static std::vector<int> banned_token_ids;
|
||||||
static std::vector<llama_token_data> top_picks;
|
static std::vector<llama_token_data> top_picks;
|
||||||
static int remaining_tokens = 0;
|
static int remaining_tokens = 0;
|
||||||
static int stopper_unused_tokens = 0;
|
static int stopper_unused_tokens = 0;
|
||||||
|
@ -221,8 +223,31 @@ void sample_top_a(llama_token_data_array * candidates, float a, size_t min_keep)
|
||||||
candidates->size = last_idx;
|
candidates->size = last_idx;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void sample_rep_pen(int n_ctx, int rep_pen_range, float rep_pen, llama_token_data_array * candidates_p)
|
||||||
|
{
|
||||||
|
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), rep_pen_range), n_ctx);
|
||||||
|
llama_sample_repetition_penalty(nullptr, candidates_p,
|
||||||
|
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
||||||
|
last_n_repeat, rep_pen);
|
||||||
|
}
|
||||||
|
|
||||||
|
void sample_temperature(llama_token_data_array * candidates_p, float temp)
|
||||||
|
{
|
||||||
|
if (temp <= 0)
|
||||||
|
{
|
||||||
|
// Imitate greedy sampling
|
||||||
|
temp = 0.01f; //cannot be zero else div0
|
||||||
|
llama_sample_temperature(nullptr, candidates_p, temp);
|
||||||
|
llama_sample_top_k(nullptr, candidates_p, 1, 1); //only want first candidate
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
llama_sample_temperature(nullptr, candidates_p, temp);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
int SampleLogits(const float * logits, int n_ctx, int n_vocab, int rep_pen_range, float rep_pen, float top_k, float top_a, float top_p, float typical_p, float tfs, float temp, std::mt19937 & rng,
|
int SampleLogits(const float * logits, int n_ctx, int n_vocab, int rep_pen_range, float rep_pen, float top_k, float top_a, float top_p, float typical_p, float tfs, float temp, std::mt19937 & rng,
|
||||||
int mirostat, float mirostat_tau, float mirostat_eta)
|
int mirostat, float mirostat_tau, float mirostat_eta, const std::vector<samplers> & sampler_order)
|
||||||
{
|
{
|
||||||
int id = 0;
|
int id = 0;
|
||||||
std::vector<llama_token_data> candidates;
|
std::vector<llama_token_data> candidates;
|
||||||
|
@ -233,48 +258,55 @@ int mirostat, float mirostat_tau, float mirostat_eta)
|
||||||
|
|
||||||
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
llama_token_data_array candidates_p = { candidates.data(), candidates.size(), false };
|
||||||
|
|
||||||
// Apply penalties
|
if (mirostat == 1 || mirostat == 2)
|
||||||
auto last_n_repeat = std::min(std::min((int)last_n_tokens.size(), rep_pen_range), n_ctx);
|
|
||||||
llama_sample_repetition_penalty(nullptr, &candidates_p,
|
|
||||||
last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
|
||||||
last_n_repeat, rep_pen);
|
|
||||||
|
|
||||||
// llama_sample_frequency_and_presence_penalties(nullptr, &candidates_p,
|
|
||||||
// last_n_tokens.data() + last_n_tokens.size() - last_n_repeat,
|
|
||||||
// last_n_repeat, alpha_frequency, alpha_presence);
|
|
||||||
|
|
||||||
if (temp <= 0)
|
|
||||||
{
|
|
||||||
// Greedy sampling
|
|
||||||
id = llama_sample_token_greedy(nullptr, &candidates_p);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
if (mirostat == 1)
|
|
||||||
{
|
{
|
||||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
static float mirostat_mu = 2.0f * mirostat_tau;
|
||||||
const int mirostat_m = 100;
|
const int mirostat_m = 100;
|
||||||
llama_sample_temperature(nullptr, &candidates_p, temp);
|
sample_rep_pen(n_ctx, rep_pen_range, rep_pen, &candidates_p);
|
||||||
id = sample_token_mirostat(n_vocab, &candidates_p, rng, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
|
sample_temperature(&candidates_p, temp);
|
||||||
}
|
if (mirostat == 1)
|
||||||
else if (mirostat == 2)
|
|
||||||
{
|
{
|
||||||
static float mirostat_mu = 2.0f * mirostat_tau;
|
id = sample_token_mirostat(n_vocab, &candidates_p, rng, mirostat_tau, mirostat_eta, mirostat_m, &mirostat_mu);
|
||||||
llama_sample_temperature(nullptr, &candidates_p, temp);
|
|
||||||
id = sample_token_mirostat_v2(&candidates_p, rng, mirostat_tau, mirostat_eta, &mirostat_mu);
|
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// Temperature sampling
|
id = sample_token_mirostat_v2(&candidates_p, rng, mirostat_tau, mirostat_eta, &mirostat_mu);
|
||||||
llama_sample_top_k(nullptr, &candidates_p, top_k,1);
|
|
||||||
sample_top_a(&candidates_p,top_a,1);
|
|
||||||
llama_sample_tail_free(nullptr, &candidates_p, tfs,1);
|
|
||||||
llama_sample_typical(nullptr, &candidates_p, typical_p,1);
|
|
||||||
llama_sample_top_p(nullptr, &candidates_p, top_p,1);
|
|
||||||
llama_sample_temperature(nullptr, &candidates_p, temp);
|
|
||||||
id = sample_token(&candidates_p, rng);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
for (int i = 0; i < sampler_order.size(); i++)
|
||||||
|
{
|
||||||
|
switch (sampler_order[i])
|
||||||
|
{
|
||||||
|
case KCPP_SAMPLER_TOP_K:
|
||||||
|
llama_sample_top_k(nullptr, &candidates_p, top_k,1);
|
||||||
|
break;
|
||||||
|
case KCPP_SAMPLER_TOP_A:
|
||||||
|
sample_top_a(&candidates_p,top_a,1);
|
||||||
|
break;
|
||||||
|
case KCPP_SAMPLER_TOP_P:
|
||||||
|
llama_sample_top_p(nullptr, &candidates_p, top_p,1);
|
||||||
|
break;
|
||||||
|
case KCPP_SAMPLER_TFS:
|
||||||
|
llama_sample_tail_free(nullptr, &candidates_p, tfs,1);
|
||||||
|
break;
|
||||||
|
case KCPP_SAMPLER_TYP:
|
||||||
|
llama_sample_typical(nullptr, &candidates_p, typical_p,1);
|
||||||
|
break;
|
||||||
|
case KCPP_SAMPLER_TEMP:
|
||||||
|
sample_temperature(&candidates_p, temp);
|
||||||
|
break;
|
||||||
|
case KCPP_SAMPLER_REP_PEN:
|
||||||
|
sample_rep_pen(n_ctx, rep_pen_range, rep_pen, &candidates_p);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
printf("\nSampleLogits: Unknown Sampler : %d",sampler_order[i]);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
id = sample_token(&candidates_p, rng);
|
||||||
|
}
|
||||||
|
|
||||||
return id;
|
return id;
|
||||||
}
|
}
|
||||||
|
@ -316,10 +348,30 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
||||||
= gpt2_ctx_v1.hparams.n_ctx = gpt2_ctx_v2.hparams.n_ctx = gpt2_ctx_v3.hparams.n_ctx
|
= gpt2_ctx_v1.hparams.n_ctx = gpt2_ctx_v2.hparams.n_ctx = gpt2_ctx_v3.hparams.n_ctx
|
||||||
= mpt_ctx_v3.hparams.n_ctx = params.n_ctx;
|
= mpt_ctx_v3.hparams.n_ctx = params.n_ctx;
|
||||||
|
|
||||||
|
//handle custom token bans
|
||||||
|
banned_tokens.clear();
|
||||||
|
for(int x=0;x<ban_token_max;++x)
|
||||||
|
{
|
||||||
|
std::string word = inputs.banned_tokens[x];
|
||||||
|
if(word!="")
|
||||||
|
{
|
||||||
|
banned_tokens.push_back(word);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//this is used for the mem_per_token eval, openblas needs more RAM
|
//this is used for the mem_per_token eval, openblas needs more RAM
|
||||||
bool use_scratch = ggml_cpu_has_gpublas();
|
bool use_scratch = ggml_cpu_has_gpublas();
|
||||||
|
|
||||||
|
int cu_parseinfo_maindevice = inputs.cublas_info<0?0:inputs.cublas_info;
|
||||||
|
|
||||||
printf("System Info: %s\n", llama_print_system_info());
|
printf("System Info: %s\n", llama_print_system_info());
|
||||||
|
#if defined(GGML_USE_CUBLAS)
|
||||||
|
if(ggml_cpu_has_gpublas() && cu_parseinfo_maindevice>0)
|
||||||
|
{
|
||||||
|
printf("CUBLAS: Set main device to %d\n",cu_parseinfo_maindevice);
|
||||||
|
ggml_cuda_set_main_device(cu_parseinfo_maindevice);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
SetQuantsUnshuffled(false);
|
SetQuantsUnshuffled(false);
|
||||||
if(file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2)
|
if(file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2)
|
||||||
{
|
{
|
||||||
|
@ -384,6 +436,7 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
||||||
llama_ctx_params.use_mmap = inputs.use_mmap;
|
llama_ctx_params.use_mmap = inputs.use_mmap;
|
||||||
llama_ctx_params.use_mlock = inputs.use_mlock;
|
llama_ctx_params.use_mlock = inputs.use_mlock;
|
||||||
llama_ctx_params.n_gpu_layers = inputs.gpulayers;
|
llama_ctx_params.n_gpu_layers = inputs.gpulayers;
|
||||||
|
llama_ctx_params.main_gpu = cu_parseinfo_maindevice;
|
||||||
|
|
||||||
llama_ctx_v3 = llama_init_from_file(modelname.c_str(), llama_ctx_params);
|
llama_ctx_v3 = llama_init_from_file(modelname.c_str(), llama_ctx_params);
|
||||||
|
|
||||||
|
@ -434,10 +487,10 @@ ModelLoadResult gpttype_load_model(const load_model_inputs inputs, FileFormat in
|
||||||
{
|
{
|
||||||
rwkv_ctx_v3 = rwkv_init_from_file(modelname.c_str(), n_threads);
|
rwkv_ctx_v3 = rwkv_init_from_file(modelname.c_str(), n_threads);
|
||||||
|
|
||||||
// if(inputs.gpulayers>0)
|
if(inputs.gpulayers>0)
|
||||||
// {
|
{
|
||||||
// rwkv_gpu_offload_layers(rwkv_ctx_v3,inputs.gpulayers);
|
rwkv_gpu_offload_layers(rwkv_ctx_v3,inputs.gpulayers);
|
||||||
// }
|
}
|
||||||
|
|
||||||
const struct rwkv_file_header & header = rwkv_ctx_v3->instance->model.header;
|
const struct rwkv_file_header & header = rwkv_ctx_v3->instance->model.header;
|
||||||
const size_t n_vocab = header.n_vocab;
|
const size_t n_vocab = header.n_vocab;
|
||||||
|
@ -917,6 +970,28 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
||||||
std::mt19937 rng(params.seed);
|
std::mt19937 rng(params.seed);
|
||||||
concat_output = "";
|
concat_output = "";
|
||||||
|
|
||||||
|
//prepare sampler order
|
||||||
|
std::vector<samplers> sampler_order;
|
||||||
|
if(inputs.sampler_len<=0) //list by value
|
||||||
|
{
|
||||||
|
sampler_order = {
|
||||||
|
KCPP_SAMPLER_REP_PEN,
|
||||||
|
KCPP_SAMPLER_TOP_K,
|
||||||
|
KCPP_SAMPLER_TOP_A,
|
||||||
|
KCPP_SAMPLER_TFS,
|
||||||
|
KCPP_SAMPLER_TYP,
|
||||||
|
KCPP_SAMPLER_TOP_P,
|
||||||
|
KCPP_SAMPLER_TEMP
|
||||||
|
};
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
for(int i=0;i<inputs.sampler_len;++i)
|
||||||
|
{
|
||||||
|
sampler_order.push_back(inputs.sampler_order[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
bool startedsampling = false;
|
bool startedsampling = false;
|
||||||
bool use_scratch = true; //for normal inference always use scratch
|
bool use_scratch = true; //for normal inference always use scratch
|
||||||
|
|
||||||
|
@ -1006,6 +1081,25 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
||||||
printf("Bad format!");
|
printf("Bad format!");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//prepare banned tokens
|
||||||
|
if(banned_token_ids.size()==0 && banned_tokens.size()>0)
|
||||||
|
{
|
||||||
|
printf("\n[First Run] Banning %d token sequences...",banned_tokens.size());
|
||||||
|
for(int v=0;v<n_vocab;++v)
|
||||||
|
{
|
||||||
|
std::string word = FileFormatTokenizeID(v,file_format);
|
||||||
|
for(int i=0;i<banned_tokens.size();++i)
|
||||||
|
{
|
||||||
|
if (word.find(banned_tokens[i]) != std::string::npos)
|
||||||
|
{
|
||||||
|
banned_token_ids.push_back(v);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printf("\nBanned a total of %d tokens.\n",banned_token_ids.size());
|
||||||
|
}
|
||||||
|
|
||||||
if(debugmode!=-1)
|
if(debugmode!=-1)
|
||||||
{
|
{
|
||||||
printf("\n");
|
printf("\n");
|
||||||
|
@ -1070,15 +1164,15 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
// if(embd.size()>1)
|
if(embd.size()>1)
|
||||||
// {
|
{
|
||||||
// evalres = rwkv_eval_sequence(rwkv_ctx_v3, (uint32_t*)embd.data(), embd.size(), rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, rwkv_ctx_v3->logits_out);
|
evalres = rwkv_eval_sequence(rwkv_ctx_v3, (uint32_t*)embd.data(), embd.size(), rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, rwkv_ctx_v3->logits_out);
|
||||||
// }
|
}
|
||||||
// else
|
else
|
||||||
// {
|
{
|
||||||
bool ignoreLogits = (!startedsampling && ((int)embd_inp.size() > input_consumed + 2));
|
bool ignoreLogits = (!startedsampling && ((int)embd_inp.size() > input_consumed + 2));
|
||||||
evalres = rwkv_eval(rwkv_ctx_v3, embd[0], rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, ignoreLogits?nullptr:rwkv_ctx_v3->logits_out);
|
evalres = rwkv_eval(rwkv_ctx_v3, embd[0], rwkv_ctx_v3->state_in, rwkv_ctx_v3->state_out, ignoreLogits?nullptr:rwkv_ctx_v3->logits_out);
|
||||||
//}
|
}
|
||||||
|
|
||||||
memcpy(logits.data(), rwkv_ctx_v3->logits_out, sizeof(float) * rwkv_vocab.size());
|
memcpy(logits.data(), rwkv_ctx_v3->logits_out, sizeof(float) * rwkv_vocab.size());
|
||||||
rwkv_ctx_v3->state_in = rwkv_ctx_v3->state_out;
|
rwkv_ctx_v3->state_in = rwkv_ctx_v3->state_out;
|
||||||
|
@ -1163,6 +1257,7 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
||||||
|
|
||||||
unsigned int eosID = 0;
|
unsigned int eosID = 0;
|
||||||
float * logitsPtr;
|
float * logitsPtr;
|
||||||
|
int btsize = banned_token_ids.size();
|
||||||
if(file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2 || file_format == FileFormat::GGJT_3)
|
if(file_format == FileFormat::GGML || file_format == FileFormat::GGHF || file_format == FileFormat::GGJT || file_format == FileFormat::GGJT_2 || file_format == FileFormat::GGJT_3)
|
||||||
{
|
{
|
||||||
if(file_format == FileFormat::GGJT_3)
|
if(file_format == FileFormat::GGJT_3)
|
||||||
|
@ -1181,6 +1276,14 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
||||||
// set the logit of the eos token (2) to zero to avoid sampling it
|
// set the logit of the eos token (2) to zero to avoid sampling it
|
||||||
logitsPtr[eosID] = 0;
|
logitsPtr[eosID] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(btsize>0)
|
||||||
|
{
|
||||||
|
for(int t=0;t<btsize;++t)
|
||||||
|
{
|
||||||
|
logitsPtr[banned_token_ids[t]]=0;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
@ -1235,11 +1338,19 @@ generation_outputs gpttype_generate(const generation_inputs inputs, generation_o
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(btsize>0)
|
||||||
|
{
|
||||||
|
int topid = std::min_element(logits.begin(), logits.end()) - logits.begin();
|
||||||
|
for (int t = 0; t < btsize; ++t)
|
||||||
|
{
|
||||||
|
logits[banned_token_ids[t]] = (logits[topid] < 0 ? logits[topid] : 0);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
id = SampleLogits(logitsPtr, nctx, n_vocab, last_n_size, repeat_penalty,
|
id = SampleLogits(logitsPtr, nctx, n_vocab, last_n_size, repeat_penalty,
|
||||||
top_k, top_a, top_p, typical_p, tfs_z, temp, rng,
|
top_k, top_a, top_p, typical_p, tfs_z, temp, rng,
|
||||||
params.mirostat,params.mirostat_tau,params.mirostat_eta);
|
params.mirostat, params.mirostat_tau, params.mirostat_eta, sampler_order);
|
||||||
|
|
||||||
last_n_tokens.erase(last_n_tokens.begin());
|
last_n_tokens.erase(last_n_tokens.begin());
|
||||||
last_n_tokens.push_back(id);
|
last_n_tokens.push_back(id);
|
||||||
|
|
19
klite.embd
19
klite.embd
File diff suppressed because one or more lines are too long
481
koboldcpp.py
481
koboldcpp.py
|
@ -12,6 +12,8 @@ import json, sys, http.server, time, asyncio, socket, threading
|
||||||
from concurrent.futures import ThreadPoolExecutor
|
from concurrent.futures import ThreadPoolExecutor
|
||||||
|
|
||||||
stop_token_max = 10
|
stop_token_max = 10
|
||||||
|
sampler_order_max = 7
|
||||||
|
ban_token_max = 10
|
||||||
|
|
||||||
class load_model_inputs(ctypes.Structure):
|
class load_model_inputs(ctypes.Structure):
|
||||||
_fields_ = [("threads", ctypes.c_int),
|
_fields_ = [("threads", ctypes.c_int),
|
||||||
|
@ -29,10 +31,12 @@ class load_model_inputs(ctypes.Structure):
|
||||||
("use_smartcontext", ctypes.c_bool),
|
("use_smartcontext", ctypes.c_bool),
|
||||||
("unban_tokens", ctypes.c_bool),
|
("unban_tokens", ctypes.c_bool),
|
||||||
("clblast_info", ctypes.c_int),
|
("clblast_info", ctypes.c_int),
|
||||||
|
("cublas_info", ctypes.c_int),
|
||||||
("blasbatchsize", ctypes.c_int),
|
("blasbatchsize", ctypes.c_int),
|
||||||
("debugmode", ctypes.c_int),
|
("debugmode", ctypes.c_int),
|
||||||
("forceversion", ctypes.c_int),
|
("forceversion", ctypes.c_int),
|
||||||
("gpulayers", ctypes.c_int)]
|
("gpulayers", ctypes.c_int),
|
||||||
|
("banned_tokens", ctypes.c_char_p * ban_token_max)]
|
||||||
|
|
||||||
class generation_inputs(ctypes.Structure):
|
class generation_inputs(ctypes.Structure):
|
||||||
_fields_ = [("seed", ctypes.c_int),
|
_fields_ = [("seed", ctypes.c_int),
|
||||||
|
@ -50,6 +54,8 @@ class generation_inputs(ctypes.Structure):
|
||||||
("mirostat", ctypes.c_int),
|
("mirostat", ctypes.c_int),
|
||||||
("mirostat_tau", ctypes.c_float),
|
("mirostat_tau", ctypes.c_float),
|
||||||
("mirostat_eta", ctypes.c_float),
|
("mirostat_eta", ctypes.c_float),
|
||||||
|
("sampler_order", ctypes.c_int * sampler_order_max),
|
||||||
|
("sampler_len", ctypes.c_int),
|
||||||
("stop_sequence", ctypes.c_char_p * stop_token_max),
|
("stop_sequence", ctypes.c_char_p * stop_token_max),
|
||||||
("stream_sse", ctypes.c_bool)]
|
("stream_sse", ctypes.c_bool)]
|
||||||
|
|
||||||
|
@ -108,7 +114,7 @@ def init_library():
|
||||||
else:
|
else:
|
||||||
print("Attempting to use CLBlast library for faster prompt ingestion. A compatible clblast will be required.")
|
print("Attempting to use CLBlast library for faster prompt ingestion. A compatible clblast will be required.")
|
||||||
use_clblast = True
|
use_clblast = True
|
||||||
elif (args.usecublas and args.usecublas!=""):
|
elif (args.usecublas is not None):
|
||||||
if not file_exists(lib_cublas):
|
if not file_exists(lib_cublas):
|
||||||
print("Warning: CuBLAS library file not found. Non-BLAS library will be used.")
|
print("Warning: CuBLAS library file not found. Non-BLAS library will be used.")
|
||||||
else:
|
else:
|
||||||
|
@ -165,7 +171,7 @@ def load_model(model_filename):
|
||||||
inputs.batch_size = 8
|
inputs.batch_size = 8
|
||||||
inputs.max_context_length = maxctx #initial value to use for ctx, can be overwritten
|
inputs.max_context_length = maxctx #initial value to use for ctx, can be overwritten
|
||||||
inputs.threads = args.threads
|
inputs.threads = args.threads
|
||||||
inputs.low_vram = (True if args.usecublas=="lowvram" else False)
|
inputs.low_vram = (True if (args.usecublas and "lowvram" in args.usecublas) else False)
|
||||||
inputs.blasthreads = args.blasthreads
|
inputs.blasthreads = args.blasthreads
|
||||||
inputs.f16_kv = True
|
inputs.f16_kv = True
|
||||||
inputs.use_mmap = (not args.nommap)
|
inputs.use_mmap = (not args.nommap)
|
||||||
|
@ -186,12 +192,25 @@ def load_model(model_filename):
|
||||||
if args.useclblast:
|
if args.useclblast:
|
||||||
clblastids = 100 + int(args.useclblast[0])*10 + int(args.useclblast[1])
|
clblastids = 100 + int(args.useclblast[0])*10 + int(args.useclblast[1])
|
||||||
inputs.clblast_info = clblastids
|
inputs.clblast_info = clblastids
|
||||||
|
inputs.cublas_info = 0
|
||||||
|
if (args.usecublas and "0" in args.usecublas):
|
||||||
|
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
|
||||||
|
elif (args.usecublas and "1" in args.usecublas):
|
||||||
|
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
|
||||||
|
elif (args.usecublas and "2" in args.usecublas):
|
||||||
|
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
|
||||||
inputs.executable_path = (getdirpath()+"/").encode("UTF-8")
|
inputs.executable_path = (getdirpath()+"/").encode("UTF-8")
|
||||||
inputs.debugmode = args.debugmode
|
inputs.debugmode = args.debugmode
|
||||||
|
banned_tokens = args.bantokens
|
||||||
|
for n in range(ban_token_max):
|
||||||
|
if not banned_tokens or n >= len(banned_tokens):
|
||||||
|
inputs.banned_tokens[n] = "".encode("UTF-8")
|
||||||
|
else:
|
||||||
|
inputs.banned_tokens[n] = banned_tokens[n].encode("UTF-8")
|
||||||
ret = handle.load_model(inputs)
|
ret = handle.load_model(inputs)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def generate(prompt,max_length=20, max_context_length=512,temperature=0.8,top_k=120, top_a=0.0 ,top_p=0.85, typical_p=1.0, tfs=1.0 ,rep_pen=1.1,rep_pen_range=128,seed=-1,stop_sequence=[],stream_sse=False):
|
def generate(prompt,max_length=20, max_context_length=512, temperature=0.8, top_k=120, top_a=0.0, top_p=0.85, typical_p=1.0, tfs=1.0, rep_pen=1.1, rep_pen_range=128, mirostat=0, mirostat_tau=5.0, mirostat_eta=0.1, sampler_order=[6,0,1,3,4,2,5], seed=-1, stop_sequence=[], stream_sse=False):
|
||||||
inputs = generation_inputs()
|
inputs = generation_inputs()
|
||||||
outputs = ctypes.create_unicode_buffer(ctypes.sizeof(generation_outputs))
|
outputs = ctypes.create_unicode_buffer(ctypes.sizeof(generation_outputs))
|
||||||
inputs.prompt = prompt.encode("UTF-8")
|
inputs.prompt = prompt.encode("UTF-8")
|
||||||
|
@ -210,8 +229,19 @@ def generate(prompt,max_length=20, max_context_length=512,temperature=0.8,top_k=
|
||||||
inputs.mirostat = int(args.usemirostat[0])
|
inputs.mirostat = int(args.usemirostat[0])
|
||||||
inputs.mirostat_tau = float(args.usemirostat[1])
|
inputs.mirostat_tau = float(args.usemirostat[1])
|
||||||
inputs.mirostat_eta = float(args.usemirostat[2])
|
inputs.mirostat_eta = float(args.usemirostat[2])
|
||||||
|
elif mirostat in (1, 2):
|
||||||
|
inputs.mirostat = mirostat
|
||||||
|
inputs.mirostat_tau = mirostat_tau
|
||||||
|
inputs.mirostat_eta = mirostat_eta
|
||||||
else:
|
else:
|
||||||
inputs.mirostat = inputs.mirostat_tau = inputs.mirostat_eta = 0
|
inputs.mirostat = inputs.mirostat_tau = inputs.mirostat_eta = 0
|
||||||
|
if sampler_order and 0 < len(sampler_order) <= sampler_order_max:
|
||||||
|
try:
|
||||||
|
for i, sampler in enumerate(sampler_order):
|
||||||
|
inputs.sampler_order[i] = sampler
|
||||||
|
inputs.sampler_len = len(sampler_order)
|
||||||
|
except TypeError as e:
|
||||||
|
print("ERROR: sampler_order must be a list of integers: " + str(e))
|
||||||
inputs.seed = seed
|
inputs.seed = seed
|
||||||
for n in range(stop_token_max):
|
for n in range(stop_token_max):
|
||||||
if not stop_sequence or n >= len(stop_sequence):
|
if not stop_sequence or n >= len(stop_sequence):
|
||||||
|
@ -241,7 +271,7 @@ maxhordectx = 1024
|
||||||
maxhordelen = 256
|
maxhordelen = 256
|
||||||
modelbusy = False
|
modelbusy = False
|
||||||
defaultport = 5001
|
defaultport = 5001
|
||||||
KcppVersion = "1.34"
|
KcppVersion = "1.34.2"
|
||||||
showdebug = True
|
showdebug = True
|
||||||
|
|
||||||
class ServerRequestHandler(http.server.SimpleHTTPRequestHandler):
|
class ServerRequestHandler(http.server.SimpleHTTPRequestHandler):
|
||||||
|
@ -277,6 +307,10 @@ class ServerRequestHandler(http.server.SimpleHTTPRequestHandler):
|
||||||
tfs=genparams.get('tfs', 1.0),
|
tfs=genparams.get('tfs', 1.0),
|
||||||
rep_pen=genparams.get('rep_pen', 1.1),
|
rep_pen=genparams.get('rep_pen', 1.1),
|
||||||
rep_pen_range=genparams.get('rep_pen_range', 128),
|
rep_pen_range=genparams.get('rep_pen_range', 128),
|
||||||
|
mirostat=genparams.get('mirostat', 0),
|
||||||
|
mirostat_tau=genparams.get('mirostat_tau', 5.0),
|
||||||
|
mirostat_eta=genparams.get('mirostat_eta', 0.1),
|
||||||
|
sampler_order=genparams.get('sampler_order', [6,0,1,3,4,2,5]),
|
||||||
seed=genparams.get('sampler_seed', -1),
|
seed=genparams.get('sampler_seed', -1),
|
||||||
stop_sequence=genparams.get('stop_sequence', []),
|
stop_sequence=genparams.get('stop_sequence', []),
|
||||||
stream_sse=stream_flag)
|
stream_sse=stream_flag)
|
||||||
|
@ -293,6 +327,10 @@ class ServerRequestHandler(http.server.SimpleHTTPRequestHandler):
|
||||||
tfs=genparams.get('tfs', 1.0),
|
tfs=genparams.get('tfs', 1.0),
|
||||||
rep_pen=genparams.get('rep_pen', 1.1),
|
rep_pen=genparams.get('rep_pen', 1.1),
|
||||||
rep_pen_range=genparams.get('rep_pen_range', 128),
|
rep_pen_range=genparams.get('rep_pen_range', 128),
|
||||||
|
mirostat=genparams.get('mirostat', 0),
|
||||||
|
mirostat_tau=genparams.get('mirostat_tau', 5.0),
|
||||||
|
mirostat_eta=genparams.get('mirostat_eta', 0.1),
|
||||||
|
sampler_order=genparams.get('sampler_order', [6,0,1,3,4,2,5]),
|
||||||
seed=genparams.get('sampler_seed', -1),
|
seed=genparams.get('sampler_seed', -1),
|
||||||
stop_sequence=genparams.get('stop_sequence', []),
|
stop_sequence=genparams.get('stop_sequence', []),
|
||||||
stream_sse=stream_flag)
|
stream_sse=stream_flag)
|
||||||
|
@ -569,10 +607,413 @@ def RunServerMultiThreaded(addr, port, embedded_kailite = None):
|
||||||
threadArr[i].stop()
|
threadArr[i].stop()
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
# note: customtkinter-5.2.0
|
||||||
|
def show_new_gui():
|
||||||
|
import customtkinter as ctk
|
||||||
|
from tkinter.filedialog import askopenfilename
|
||||||
|
from tkinter.filedialog import asksaveasfile
|
||||||
|
|
||||||
def show_gui():
|
# if args received, launch
|
||||||
|
if len(sys.argv) != 1:
|
||||||
|
root = ctk.CTk()
|
||||||
|
#we dont want the useless window to be visible, but we want it in taskbar
|
||||||
|
root.attributes("-alpha", 0)
|
||||||
|
args.model_param = askopenfilename(title="Select ggml model .bin files")
|
||||||
|
root.destroy()
|
||||||
|
if not args.model_param:
|
||||||
|
print("\nNo ggml model file was selected. Exiting.")
|
||||||
|
time.sleep(2)
|
||||||
|
sys.exit(2)
|
||||||
|
return
|
||||||
|
|
||||||
|
nextstate = 0 #0=exit, 1=launch, 2=oldgui
|
||||||
|
windowwidth = 520
|
||||||
|
windowheight = 500
|
||||||
|
ctk.set_appearance_mode("dark")
|
||||||
|
root = ctk.CTk()
|
||||||
|
root.geometry(str(windowwidth) + "x" + str(windowheight))
|
||||||
|
root.title("KoboldCpp v"+KcppVersion)
|
||||||
|
root.resizable(False,False)
|
||||||
|
|
||||||
|
tabs = ctk.CTkFrame(root, corner_radius = 0, width=windowwidth, height=windowheight-50)
|
||||||
|
tabs.grid(row=0, stick="nsew")
|
||||||
|
tabnames= ["Quick Launch", "Hardware", "Tokens", "Model", "Network"]
|
||||||
|
navbuttons = {}
|
||||||
|
navbuttonframe = ctk.CTkFrame(tabs, width=100, height=int(tabs.cget("height")))
|
||||||
|
navbuttonframe.grid(row=0, column=0, padx=2,pady=2)
|
||||||
|
navbuttonframe.grid_propagate(False)
|
||||||
|
|
||||||
|
tabcontentframe = ctk.CTkFrame(tabs, width=windowwidth - int(navbuttonframe.cget("width")), height=int(tabs.cget("height")))
|
||||||
|
tabcontentframe.grid(row=0, column=1, sticky="nsew", padx=2, pady=2)
|
||||||
|
tabcontentframe.grid_propagate(False)
|
||||||
|
|
||||||
|
tabcontent = {}
|
||||||
|
|
||||||
|
# slider data
|
||||||
|
blasbatchsize_values = ["-1", "32", "64", "128", "256", "512", "1024"]
|
||||||
|
blasbatchsize_text = ["Don't Batch BLAS","32","64","128","256","512","1024"]
|
||||||
|
contextsize_text = ["512", "1024", "2048", "3072", "4096", "6144", "8192"]
|
||||||
|
runopts = ["Use OpenBLAS","Use CLBlast", "Use CuBLAS", "Use No BLAS","Use OpenBLAS (Old CPU, noavx2)","Failsafe Mode (Old CPU, noavx)"]
|
||||||
|
|
||||||
|
def tabbuttonaction(name):
|
||||||
|
for t in tabcontent:
|
||||||
|
if name == t:
|
||||||
|
tabcontent[t].grid(row=0, column=0)
|
||||||
|
navbuttons[t].configure(fg_color="#6f727b")
|
||||||
|
else:
|
||||||
|
tabcontent[t].grid_forget()
|
||||||
|
navbuttons[t].configure(fg_color="transparent")
|
||||||
|
|
||||||
|
# Dynamically create tabs + buttons based on values of [tabnames]
|
||||||
|
for idx, name in enumerate(tabnames):
|
||||||
|
tabcontent[name] = ctk.CTkFrame(tabcontentframe, width=int(tabcontentframe.cget("width")), height=int(tabcontentframe.cget("height")), fg_color="transparent")
|
||||||
|
tabcontent[name].grid_propagate(False)
|
||||||
|
if idx == 0:
|
||||||
|
tabcontent[name].grid(row=idx, sticky="nsew")
|
||||||
|
ctk.CTkLabel(tabcontent[name], text= name, font=ctk.CTkFont(None, 14, 'bold')).grid(row=0, padx=12, pady = 5, stick='nw')
|
||||||
|
|
||||||
|
navbuttons[name] = ctk.CTkButton(navbuttonframe, text=name, width = 100, corner_radius=0 , command = lambda d=name:tabbuttonaction(d), hover_color="#868a94" )
|
||||||
|
navbuttons[name].grid(row=idx)
|
||||||
|
|
||||||
|
tabbuttonaction(tabnames[0])
|
||||||
|
|
||||||
|
# helper functions
|
||||||
|
def makecheckbox(parent, text, variable=None, row=0, column=0, command=None, onvalue=1, offvalue=0):
|
||||||
|
temp = ctk.CTkCheckBox(parent, text=text,variable=variable, onvalue=onvalue, offvalue=offvalue)
|
||||||
|
if command is not None and variable is not None:
|
||||||
|
variable.trace("w", command)
|
||||||
|
temp.grid(row=row,column=column, padx=8, pady=1, stick="nw")
|
||||||
|
return temp
|
||||||
|
|
||||||
|
def makelabel(parent, text, row, column=0):
|
||||||
|
temp = ctk.CTkLabel(parent, text=text)
|
||||||
|
temp.grid(row=row, column=column, padx=8, pady=1, stick="nw")
|
||||||
|
return temp
|
||||||
|
|
||||||
|
def makeslider(parent, label, options, var, from_ , to, row=0, width=160, height=10, set=0):
|
||||||
|
sliderLabel = makelabel(parent, options[set], row + 1, 1)
|
||||||
|
makelabel(parent, label, row)
|
||||||
|
|
||||||
|
def sliderUpdate(a,b,c):
|
||||||
|
sliderLabel.configure(text = options[int(var.get())])
|
||||||
|
var.trace("w", sliderUpdate)
|
||||||
|
slider = ctk.CTkSlider(parent, from_=from_, to=to, variable = var, width = width, height=height, border_width=5,number_of_steps=len(options) - 1)
|
||||||
|
slider.grid(row=row+1, column=0, padx = 8, stick="w")
|
||||||
|
slider.set(set)
|
||||||
|
return slider
|
||||||
|
|
||||||
|
|
||||||
|
def makelabelentry(parent, text, var, row=0, width= 50):
|
||||||
|
label = makelabel(parent, text, row)
|
||||||
|
entry = ctk.CTkEntry(parent, width=width, textvariable=var) #you cannot set placeholder text for SHARED variables
|
||||||
|
entry.grid(row=row, column=1, padx= 8, stick="nw")
|
||||||
|
return entry, label
|
||||||
|
|
||||||
|
|
||||||
|
def makefileentry(parent, text, searchtext, var, row=0, width=250):
|
||||||
|
makelabel(parent, text, row)
|
||||||
|
def getfilename(var, text):
|
||||||
|
var.set(askopenfilename(title=text))
|
||||||
|
entry = ctk.CTkEntry(parent, width, textvariable=var)
|
||||||
|
entry.grid(row=row+1, column=0, padx=8, stick="nw")
|
||||||
|
button = ctk.CTkButton(parent, 50, text="Browse", command= lambda a=var,b=searchtext:getfilename(a,b))
|
||||||
|
button.grid(row=row+1, column=1, stick="nw")
|
||||||
|
return
|
||||||
|
|
||||||
|
# Vars - should be in scope to be used by multiple widgets
|
||||||
|
gpulayers_var = ctk.StringVar(value="0")
|
||||||
|
threads_var = ctk.StringVar(value=str(default_threads))
|
||||||
|
runopts_var = ctk.StringVar()
|
||||||
|
gpu_choice_var = ctk.StringVar(value="1")
|
||||||
|
|
||||||
|
launchbrowser = ctk.IntVar(value=1)
|
||||||
|
highpriority = ctk.IntVar()
|
||||||
|
disablemmap = ctk.IntVar()
|
||||||
|
psutil = ctk.IntVar()
|
||||||
|
usemlock = ctk.IntVar()
|
||||||
|
debugmode = ctk.IntVar()
|
||||||
|
|
||||||
|
lowvram_var = ctk.IntVar()
|
||||||
|
|
||||||
|
blas_threads_var = ctk.StringVar()
|
||||||
|
blas_size_var = ctk.IntVar()
|
||||||
|
version_var =ctk.StringVar(value="0")
|
||||||
|
|
||||||
|
stream = ctk.IntVar()
|
||||||
|
smartcontext = ctk.IntVar()
|
||||||
|
unbantokens = ctk.IntVar()
|
||||||
|
usemirostat = ctk.IntVar()
|
||||||
|
mirostat_var = ctk.StringVar(value="2")
|
||||||
|
mirostat_tau = ctk.StringVar(value="5.0")
|
||||||
|
mirostat_eta = ctk.StringVar(value="0.1")
|
||||||
|
|
||||||
|
context_var = ctk.IntVar()
|
||||||
|
|
||||||
|
model_var = ctk.StringVar()
|
||||||
|
lora_var = ctk.StringVar()
|
||||||
|
lora_base_var = ctk.StringVar()
|
||||||
|
|
||||||
|
port_var = ctk.StringVar(value=defaultport)
|
||||||
|
host_var = ctk.StringVar(value="")
|
||||||
|
horde_name_var = ctk.StringVar(value="koboldcpp")
|
||||||
|
horde_gen_var = ctk.StringVar(value=maxhordelen)
|
||||||
|
horde_context_var = ctk.StringVar(value=maxhordectx)
|
||||||
|
usehorde_var = ctk.IntVar()
|
||||||
|
|
||||||
|
# Quick Launch Tab
|
||||||
|
quick_tab = tabcontent["Quick Launch"]
|
||||||
|
|
||||||
|
# gpu options
|
||||||
|
quick_gpu_layers_entry,quick_gpu_layers_label = makelabelentry(quick_tab,"GPU Layers:", gpulayers_var, 4, 50)
|
||||||
|
quick_gpu_selector_label = makelabel(quick_tab, "GPU ID:", 3)
|
||||||
|
quick_gpu_selector_box = ctk.CTkComboBox(quick_tab, values=["1","2","3"], width=60, variable=gpu_choice_var, state="readonly")
|
||||||
|
quick_lowvram_box = makecheckbox(quick_tab, "Low VRAM", lowvram_var, 5)
|
||||||
|
|
||||||
|
# hides gpu options when CLBlast is not chosen
|
||||||
|
def changerunmode(a,b,c):
|
||||||
|
index = runopts_var.get()
|
||||||
|
if index == "Use CLBlast" or index == "Use CuBLAS":
|
||||||
|
gpu_selector_label.grid(row=3, column=0, padx = 8, pady=1, stick="nw")
|
||||||
|
gpu_selector_box.grid(row=3, column=1, padx=8, pady=1, stick="nw")
|
||||||
|
quick_gpu_selector_label.grid(row=3, column=0, padx = 8, pady=1, stick="nw")
|
||||||
|
quick_gpu_selector_box.grid(row=3, column=1, padx=8, pady=1, stick="nw")
|
||||||
|
else:
|
||||||
|
gpu_selector_label.grid_forget()
|
||||||
|
gpu_selector_box.grid_forget()
|
||||||
|
quick_gpu_selector_label.grid_forget()
|
||||||
|
quick_gpu_selector_box.grid_forget()
|
||||||
|
|
||||||
|
if index == "Use CuBLAS":
|
||||||
|
lowvram_box.grid(row=4, column=0, padx=8, pady=1, stick="nw")
|
||||||
|
quick_lowvram_box.grid(row=4, column=0, padx=8, pady=1, stick="nw")
|
||||||
|
else:
|
||||||
|
lowvram_box.grid_forget()
|
||||||
|
quick_lowvram_box.grid_forget()
|
||||||
|
|
||||||
|
if index == "Use CLBlast" or index == "Use CuBLAS":
|
||||||
|
gpu_layers_label.grid(row=5, column=0, padx = 8, pady=1, stick="nw")
|
||||||
|
gpu_layers_entry.grid(row=5, column=1, padx=8, pady=1, stick="nw")
|
||||||
|
quick_gpu_layers_label.grid(row=5, column=0, padx = 8, pady=1, stick="nw")
|
||||||
|
quick_gpu_layers_entry.grid(row=5, column=1, padx=8, pady=1, stick="nw")
|
||||||
|
else:
|
||||||
|
gpu_layers_label.grid_forget()
|
||||||
|
gpu_layers_entry.grid_forget()
|
||||||
|
quick_gpu_layers_label.grid_forget()
|
||||||
|
quick_gpu_layers_entry.grid_forget()
|
||||||
|
|
||||||
|
# presets selector
|
||||||
|
makelabel(quick_tab, "Presets:", 1)
|
||||||
|
|
||||||
|
runoptbox = ctk.CTkComboBox(quick_tab, values=runopts, width=180,variable=runopts_var, state="readonly")
|
||||||
|
runoptbox.grid(row=1, column=1,padx=8, stick="nw")
|
||||||
|
runoptbox.set("Use OpenBLAS")
|
||||||
|
|
||||||
|
# threads
|
||||||
|
makelabelentry(quick_tab, "Threads:" , threads_var, 8, 50)
|
||||||
|
|
||||||
|
# blas batch size
|
||||||
|
makeslider(quick_tab, "BLAS Batch Size:", blasbatchsize_text, blas_size_var, 0, 6, 12, set=5)
|
||||||
|
|
||||||
|
# quick boxes
|
||||||
|
quick_boxes = {"Launch Browser": launchbrowser , "High Priority" : highpriority, "Streaming Mode":stream, "Use SmartContext":smartcontext, "Unban Tokens":unbantokens, "Disable MMAP":disablemmap,}
|
||||||
|
for idx, name, in enumerate(quick_boxes):
|
||||||
|
makecheckbox(quick_tab, name, quick_boxes[name], int(idx/2) +20, idx%2)
|
||||||
|
|
||||||
|
# context size
|
||||||
|
makeslider(quick_tab, "Context Size:", contextsize_text, context_var, 0, len(contextsize_text)-1, 30, set=2)
|
||||||
|
|
||||||
|
# load model
|
||||||
|
makefileentry(quick_tab, "Model:", "Select GGML Model File", model_var, 40, 170)
|
||||||
|
|
||||||
|
# Hardware Tab
|
||||||
|
hardware_tab = tabcontent["Hardware"]
|
||||||
|
|
||||||
|
# gpu options
|
||||||
|
gpu_layers_entry,gpu_layers_label = makelabelentry(hardware_tab,"GPU Layers:", gpulayers_var, 4, 50)
|
||||||
|
gpu_selector_label = makelabel(hardware_tab, "GPU ID:", 3)
|
||||||
|
gpu_selector_box = ctk.CTkComboBox(hardware_tab, values=["1","2","3"], width=60, variable=gpu_choice_var, state="readonly")
|
||||||
|
lowvram_box = makecheckbox(hardware_tab, "Low VRAM", lowvram_var, 5)
|
||||||
|
|
||||||
|
# presets selector
|
||||||
|
makelabel(hardware_tab, "Presets:", 1)
|
||||||
|
runoptbox = ctk.CTkComboBox(hardware_tab, values=runopts, width=180,variable=runopts_var, state="readonly")
|
||||||
|
runoptbox.grid(row=1, column=1,padx=8, stick="nw")
|
||||||
|
runoptbox.set("Use OpenBLAS")
|
||||||
|
runopts_var.trace('w', changerunmode)
|
||||||
|
changerunmode(1,1,1)
|
||||||
|
# threads
|
||||||
|
makelabelentry(hardware_tab, "Threads:" , threads_var, 8, 50)
|
||||||
|
|
||||||
|
# hardware checkboxes
|
||||||
|
hardware_boxes = {"Launch Browser": launchbrowser , "High Priority" : highpriority, "Disable MMAP":disablemmap, "Use mlock":usemlock, "PSUtil Set Threads":psutil, "Debug Mode":debugmode,}
|
||||||
|
|
||||||
|
for idx, name, in enumerate(hardware_boxes):
|
||||||
|
makecheckbox(hardware_tab, name, hardware_boxes[name], int(idx/2) +30, idx%2)
|
||||||
|
|
||||||
|
# blas thread specifier
|
||||||
|
makelabelentry(hardware_tab, "BLAS threads:" , blas_threads_var, 11, 50)
|
||||||
|
# blas batch size
|
||||||
|
makeslider(hardware_tab, "BLAS Batch Size:", blasbatchsize_text, blas_size_var, 0, 6, 12, set=5)
|
||||||
|
# force version
|
||||||
|
makelabelentry(hardware_tab, "Force Version:" , version_var, 100, 50)
|
||||||
|
|
||||||
|
# Tokens Tab
|
||||||
|
tokens_tab = tabcontent["Tokens"]
|
||||||
|
# tokens checkboxes
|
||||||
|
token_boxes = {"Streaming Mode":stream, "Use SmartContext":smartcontext, "Unban Tokens":unbantokens}
|
||||||
|
for idx, name, in enumerate(token_boxes):
|
||||||
|
makecheckbox(tokens_tab, name, token_boxes[name], idx + 1)
|
||||||
|
|
||||||
|
mirostat_entry, mirostate_label = makelabelentry(tokens_tab, "Mirostat:", mirostat_var)
|
||||||
|
mirostat_tau_entry, mirostat_tau_label = makelabelentry(tokens_tab, "Mirostat Tau:", mirostat_tau)
|
||||||
|
mirostat_eta_entry, mirostat_eta_label = makelabelentry(tokens_tab, "Mirostat Eta:", mirostat_eta)
|
||||||
|
def togglemiro(a,b,c):
|
||||||
|
items = [mirostate_label, mirostat_entry, mirostat_tau_label, mirostat_tau_entry, mirostat_eta_label, mirostat_eta_entry]
|
||||||
|
for idx, item in enumerate(items):
|
||||||
|
if usemirostat.get() == 1:
|
||||||
|
item.grid(row=11 + int(idx/2), column=idx%2, padx=8, stick="nw")
|
||||||
|
else:
|
||||||
|
item.grid_forget()
|
||||||
|
|
||||||
|
|
||||||
|
makecheckbox(tokens_tab, "Use Mirostat", row=10, variable=usemirostat, command=togglemiro)
|
||||||
|
togglemiro(1,1,1)
|
||||||
|
|
||||||
|
# context size
|
||||||
|
makeslider(tokens_tab, "Context Size:",contextsize_text, context_var, 0, 4, 20, set=2)
|
||||||
|
|
||||||
|
# Model Tab
|
||||||
|
model_tab = tabcontent["Model"]
|
||||||
|
|
||||||
|
makefileentry(model_tab, "Model:", "Select GGML Model File", model_var, 1)
|
||||||
|
makefileentry(model_tab, "Lora:", "Select Lora File",lora_var, 3)
|
||||||
|
makefileentry(model_tab, "Lora Base:", "Select Lora Base File", lora_base_var, 5)
|
||||||
|
|
||||||
|
# Network Tab
|
||||||
|
network_tab = tabcontent["Network"]
|
||||||
|
|
||||||
|
# interfaces
|
||||||
|
makelabelentry(network_tab, "Port: ", port_var, 1, 150)
|
||||||
|
makelabelentry(network_tab, "Host: ", host_var, 2, 150)
|
||||||
|
|
||||||
|
# horde
|
||||||
|
makelabel(network_tab, "Horde:", 3).grid(pady=10)
|
||||||
|
|
||||||
|
horde_name_entry, horde_name_label = makelabelentry(network_tab, "Horde Name:", horde_name_var, 5, 200)
|
||||||
|
horde_gen_entry, horde_gen_label = makelabelentry(network_tab, "Gen. Length:", horde_gen_var, 6, 50)
|
||||||
|
horde_context_entry, horde_context_label = makelabelentry(network_tab, "Max Context:",horde_context_var, 7, 50)
|
||||||
|
|
||||||
|
def togglehorde(a,b,c):
|
||||||
|
labels = [horde_name_label, horde_gen_label, horde_context_label]
|
||||||
|
for idx, item in enumerate([horde_name_entry, horde_gen_entry, horde_context_entry]):
|
||||||
|
if usehorde_var.get() == 1:
|
||||||
|
item.grid(row=5 + idx, column = 1, padx=8, pady=1, stick="nw")
|
||||||
|
labels[idx].grid(row=5 + idx, padx=8, pady=1, stick="nw")
|
||||||
|
else:
|
||||||
|
item.grid_forget()
|
||||||
|
labels[idx].grid_forget()
|
||||||
|
|
||||||
|
usehorde_box = makecheckbox(network_tab, "Configure for Horde", usehorde_var, 4, command=togglehorde)
|
||||||
|
togglehorde(1,1,1)
|
||||||
|
|
||||||
|
# launch
|
||||||
|
def guilaunch():
|
||||||
|
if model_var.get() == "":
|
||||||
|
tmp = askopenfilename(title="Select ggml model .bin files")
|
||||||
|
model_var.set(tmp)
|
||||||
|
nonlocal nextstate
|
||||||
|
nextstate = 1
|
||||||
|
root.destroy()
|
||||||
|
pass
|
||||||
|
|
||||||
|
def switch_old_gui():
|
||||||
|
nonlocal nextstate
|
||||||
|
nextstate = 2
|
||||||
|
root.destroy()
|
||||||
|
pass
|
||||||
|
|
||||||
|
ctk.CTkButton(tabs , text = "Launch", fg_color="#2f8d3c", command = guilaunch, width=80, height = 35 ).grid(row=1,column=1, stick="se", padx= 25, pady=5)
|
||||||
|
|
||||||
|
# ctk.CTkButton(tabs , text = "Save", fg_color="#084a66", command = save_config, width=60, height = 35 ).grid(row=1,column=1, stick="sw", padx= 5, pady=5)
|
||||||
|
# ctk.CTkButton(tabs , text = "Load", fg_color="#084a66", command = load_config, width=60, height = 35 ).grid(row=1,column=1, stick="sw", padx= 70, pady=5)
|
||||||
|
|
||||||
|
ctk.CTkButton(tabs , text = "Old GUI", fg_color="#084a66", command = switch_old_gui, width=100, height = 35 ).grid(row=1,column=0, stick="sw", padx= 5, pady=5)
|
||||||
|
# runs main loop until closed or launch clicked
|
||||||
|
root.mainloop()
|
||||||
|
|
||||||
|
if nextstate==0:
|
||||||
|
print("Exiting by user request.")
|
||||||
|
time.sleep(2)
|
||||||
|
sys.exit()
|
||||||
|
elif nextstate==2:
|
||||||
|
time.sleep(0.1)
|
||||||
|
show_old_gui()
|
||||||
|
else:
|
||||||
|
# processing vars
|
||||||
|
args.threads = int(threads_var.get())
|
||||||
|
|
||||||
|
args.usemlock = usemlock.get() == 1
|
||||||
|
args.debugmode = debugmode.get() == 1
|
||||||
|
args.launch = launchbrowser.get()==1
|
||||||
|
args.highpriority = highpriority.get()==1
|
||||||
|
args.nommap = disablemmap.get()==1
|
||||||
|
args.psutil_set_threads = psutil.get()==1
|
||||||
|
args.stream = stream.get()==1
|
||||||
|
args.smartcontext = smartcontext.get()==1
|
||||||
|
args.unbantokens = unbantokens.get()==1
|
||||||
|
|
||||||
|
gpuchoiceidx = int(gpu_choice_var.get())-1
|
||||||
|
if runopts_var.get() == runopts[1]:
|
||||||
|
args.useclblast = [[0,0], [1,0], [0,1]][gpuchoiceidx]
|
||||||
|
if runopts_var.get() == runopts[2]:
|
||||||
|
args.usecublas = ["lowvram",str(gpuchoiceidx)] if lowvram_var.get() == 1 else ["normal",str(gpuchoiceidx)]
|
||||||
|
if gpulayers_var.get():
|
||||||
|
args.gpulayers = int(gpulayers_var.get())
|
||||||
|
if runopts_var.get()==runopts[3]:
|
||||||
|
args.noblas = True
|
||||||
|
if runopts_var.get()==runopts[4]:
|
||||||
|
args.noavx2 = True
|
||||||
|
if runopts_var.get()==runopts[5]:
|
||||||
|
args.noavx2 = True
|
||||||
|
args.noblas = True
|
||||||
|
args.nommap = True
|
||||||
|
print("[Failsafe Mode : mmap is disabled.]")
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
args.blasthreads = None if blas_threads_var.get()=="" else int(blas_threads_var.get())
|
||||||
|
|
||||||
|
args.blasbatchsize = int(blasbatchsize_values[int(blas_size_var.get())])
|
||||||
|
args.forceversion = 0 if version_var.get()=="" else int(version_var.get())
|
||||||
|
|
||||||
|
args.mirostat = [int(mirostat_var.get()), float(mirostat_tau.get()), float(mirostat_eta.get())] if usemirostat.get()==1 else None
|
||||||
|
args.contextsize = int(contextsize_text[context_var.get()])
|
||||||
|
|
||||||
|
args.model_param = None if model_var.get() == "" else model_var.get()
|
||||||
|
args.lora = None if lora_var.get() == "" else ([lora_var.get()] if lora_base_var.get()=="" else [lora_var.get(), lora_base_var.get()])
|
||||||
|
|
||||||
|
args.port_param = defaultport if port_var.get()=="" else int(port_var.get())
|
||||||
|
args.host = host_var.get()
|
||||||
|
|
||||||
|
args.hordeconfig = None if usehorde_var.get() == 0 else [horde_name_var.get(), horde_gen_var.get(), horde_context_var.get()]
|
||||||
|
|
||||||
|
if not args.model_param:
|
||||||
|
print("\nNo ggml model file was selected. Exiting.")
|
||||||
|
time.sleep(2)
|
||||||
|
sys.exit(2)
|
||||||
|
|
||||||
|
def show_gui_warning():
|
||||||
|
from tkinter import messagebox
|
||||||
|
import tkinter as tk
|
||||||
|
root = tk.Tk()
|
||||||
|
root.attributes("-alpha", 0)
|
||||||
|
messagebox.showerror(title="New GUI failed, using Old GUI", message="The new GUI failed to load.\n\nTo use new GUI, please install the customtkinter python module.")
|
||||||
|
root.destroy()
|
||||||
|
|
||||||
|
def show_old_gui():
|
||||||
import tkinter as tk
|
import tkinter as tk
|
||||||
from tkinter.filedialog import askopenfilename
|
from tkinter.filedialog import askopenfilename
|
||||||
|
from tkinter import messagebox
|
||||||
|
|
||||||
if len(sys.argv) == 1:
|
if len(sys.argv) == 1:
|
||||||
#no args passed at all. Show nooby gui
|
#no args passed at all. Show nooby gui
|
||||||
|
@ -650,7 +1091,7 @@ def show_gui():
|
||||||
|
|
||||||
# Create button, it will change label text
|
# Create button, it will change label text
|
||||||
tk.Button(root , text = "Launch", font = ("Impact", 18), bg='#54FA9B', command = guilaunch ).grid(row=6,column=0)
|
tk.Button(root , text = "Launch", font = ("Impact", 18), bg='#54FA9B', command = guilaunch ).grid(row=6,column=0)
|
||||||
tk.Label(root, text = "(Please use the Command Line for more advanced options)",
|
tk.Label(root, text = "(Please use the Command Line for more advanced options)\nThis GUI is deprecated. Please install customtkinter.",
|
||||||
font = ("Arial", 9)).grid(row=7,column=0)
|
font = ("Arial", 9)).grid(row=7,column=0)
|
||||||
|
|
||||||
root.mainloop()
|
root.mainloop()
|
||||||
|
@ -680,7 +1121,7 @@ def show_gui():
|
||||||
if selrunchoice==runopts[3]:
|
if selrunchoice==runopts[3]:
|
||||||
args.useclblast = [0,1]
|
args.useclblast = [0,1]
|
||||||
if selrunchoice==runopts[4]:
|
if selrunchoice==runopts[4]:
|
||||||
args.usecublas = True
|
args.usecublas = ["normal"]
|
||||||
if selrunchoice==runopts[5]:
|
if selrunchoice==runopts[5]:
|
||||||
args.noblas = True
|
args.noblas = True
|
||||||
if selrunchoice==runopts[6]:
|
if selrunchoice==runopts[6]:
|
||||||
|
@ -733,12 +1174,20 @@ def main(args):
|
||||||
if not args.model_param:
|
if not args.model_param:
|
||||||
#give them a chance to pick a file
|
#give them a chance to pick a file
|
||||||
print("For command line arguments, please refer to --help")
|
print("For command line arguments, please refer to --help")
|
||||||
print("Otherwise, please manually select ggml file:")
|
print("***")
|
||||||
try:
|
try:
|
||||||
show_gui()
|
show_new_gui()
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
|
print("Failed to use new GUI. Reason: " + str(ex))
|
||||||
|
print("Make sure customtkinter is installed!!!")
|
||||||
|
print("Attempting to use old GUI...")
|
||||||
|
if not args.model_param:
|
||||||
|
try:
|
||||||
|
show_gui_warning()
|
||||||
|
show_old_gui()
|
||||||
|
except Exception as ex2:
|
||||||
print("File selection GUI unsupported. Please check command line: script.py --help")
|
print("File selection GUI unsupported. Please check command line: script.py --help")
|
||||||
print("Reason for no GUI: " + str(ex))
|
print("Reason for no GUI: " + str(ex2))
|
||||||
time.sleep(2)
|
time.sleep(2)
|
||||||
sys.exit(2)
|
sys.exit(2)
|
||||||
|
|
||||||
|
@ -810,7 +1259,8 @@ def main(args):
|
||||||
args.blasthreads = args.threads
|
args.blasthreads = args.threads
|
||||||
|
|
||||||
modelname = os.path.abspath(args.model_param)
|
modelname = os.path.abspath(args.model_param)
|
||||||
print(f"Loading model: {modelname} \n[Threads: {args.threads}, BlasThreads: {args.blasthreads}, SmartContext: {args.smartcontext}]")
|
print(args)
|
||||||
|
print(f"==========\nLoading model: {modelname} \n[Threads: {args.threads}, BlasThreads: {args.blasthreads}, SmartContext: {args.smartcontext}]")
|
||||||
loadok = load_model(modelname)
|
loadok = load_model(modelname)
|
||||||
print("Load Model OK: " + str(loadok))
|
print("Load Model OK: " + str(loadok))
|
||||||
|
|
||||||
|
@ -845,7 +1295,7 @@ def main(args):
|
||||||
asyncio.run(RunServerMultiThreaded(args.host, args.port, embedded_kailite))
|
asyncio.run(RunServerMultiThreaded(args.host, args.port, embedded_kailite))
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
print("Welcome to KoboldCpp - Version " + KcppVersion) # just update version manually
|
print("***\nWelcome to KoboldCpp - Version " + KcppVersion) # just update version manually
|
||||||
# print("Python version: " + sys.version)
|
# print("Python version: " + sys.version)
|
||||||
parser = argparse.ArgumentParser(description='KoboldCpp Server')
|
parser = argparse.ArgumentParser(description='KoboldCpp Server')
|
||||||
modelgroup = parser.add_mutually_exclusive_group() #we want to be backwards compatible with the unnamed positional args
|
modelgroup = parser.add_mutually_exclusive_group() #we want to be backwards compatible with the unnamed positional args
|
||||||
|
@ -869,7 +1319,8 @@ if __name__ == '__main__':
|
||||||
parser.add_argument("--blasbatchsize", help="Sets the batch size used in BLAS processing (default 512). Setting it to -1 disables BLAS mode, but keeps other benefits like GPU offload.", type=int,choices=[-1,32,64,128,256,512,1024], default=512)
|
parser.add_argument("--blasbatchsize", help="Sets the batch size used in BLAS processing (default 512). Setting it to -1 disables BLAS mode, but keeps other benefits like GPU offload.", type=int,choices=[-1,32,64,128,256,512,1024], default=512)
|
||||||
parser.add_argument("--stream", help="Uses streaming when generating tokens. Only for the Kobold Lite UI.", action='store_true')
|
parser.add_argument("--stream", help="Uses streaming when generating tokens. Only for the Kobold Lite UI.", action='store_true')
|
||||||
parser.add_argument("--smartcontext", help="Reserving a portion of context to try processing less frequently.", action='store_true')
|
parser.add_argument("--smartcontext", help="Reserving a portion of context to try processing less frequently.", action='store_true')
|
||||||
parser.add_argument("--unbantokens", help="Normally, KoboldAI prevents certain tokens such as EOS and Square Brackets. This flag unbans them.", action='store_true')
|
parser.add_argument("--unbantokens", help="Normally, KoboldAI prevents the EOS token from being generated. This flag unbans it.", action='store_true')
|
||||||
|
parser.add_argument("--bantokens", help="You can manually specify a list of token SUBSTRINGS that the AI cannot use. This bans ALL instances of that substring.", metavar=('[token_substrings]'), nargs='+')
|
||||||
parser.add_argument("--usemirostat", help="Experimental! Replaces your samplers with mirostat. Takes 3 params = [type(0/1/2), tau(5.0), eta(0.1)].",metavar=('[type]', '[tau]', '[eta]'), type=float, nargs=3)
|
parser.add_argument("--usemirostat", help="Experimental! Replaces your samplers with mirostat. Takes 3 params = [type(0/1/2), tau(5.0), eta(0.1)].",metavar=('[type]', '[tau]', '[eta]'), type=float, nargs=3)
|
||||||
parser.add_argument("--forceversion", help="If the model file format detection fails (e.g. rogue modified model) you can set this to override the detected format (enter desired version, e.g. 401 for GPTNeoX-Type2).",metavar=('[version]'), type=int, default=0)
|
parser.add_argument("--forceversion", help="If the model file format detection fails (e.g. rogue modified model) you can set this to override the detected format (enter desired version, e.g. 401 for GPTNeoX-Type2).",metavar=('[version]'), type=int, default=0)
|
||||||
parser.add_argument("--nommap", help="If set, do not use mmap to load newer models", action='store_true')
|
parser.add_argument("--nommap", help="If set, do not use mmap to load newer models", action='store_true')
|
||||||
|
@ -881,7 +1332,7 @@ if __name__ == '__main__':
|
||||||
compatgroup = parser.add_mutually_exclusive_group()
|
compatgroup = parser.add_mutually_exclusive_group()
|
||||||
compatgroup.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true')
|
compatgroup.add_argument("--noblas", help="Do not use OpenBLAS for accelerated prompt ingestion", action='store_true')
|
||||||
compatgroup.add_argument("--useclblast", help="Use CLBlast for GPU Acceleration. Must specify exactly 2 arguments, platform ID and device ID (e.g. --useclblast 1 0).", type=int, choices=range(0,9), nargs=2)
|
compatgroup.add_argument("--useclblast", help="Use CLBlast for GPU Acceleration. Must specify exactly 2 arguments, platform ID and device ID (e.g. --useclblast 1 0).", type=int, choices=range(0,9), nargs=2)
|
||||||
compatgroup.add_argument("--usecublas", help="Use CuBLAS for GPU Acceleration. Requires Nvidia GPU. Select lowvram to not allocate VRAM scratch buffer.", default='', const='normal', nargs='?', choices=['normal', 'lowvram'])
|
compatgroup.add_argument("--usecublas", help="Use CuBLAS for GPU Acceleration. Requires Nvidia GPU. Select lowvram to not allocate VRAM scratch buffer. Enter a number after to select a different main GPU.", nargs='*',metavar=('[lowvram|normal] [main GPU ID]'), choices=['normal', 'lowvram', '0', '1', '2'])
|
||||||
parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using GPU. Requires GPU.",metavar=('[GPU layers]'), type=int, default=0)
|
parser.add_argument("--gpulayers", help="Set number of layers to offload to GPU when using GPU. Requires GPU.",metavar=('[GPU layers]'), type=int, default=0)
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
main(args)
|
main(args)
|
||||||
|
|
87
llama.cpp
87
llama.cpp
|
@ -195,8 +195,8 @@ struct llama_layer {
|
||||||
};
|
};
|
||||||
|
|
||||||
struct llama_kv_cache {
|
struct llama_kv_cache {
|
||||||
struct ggml_tensor * k;
|
struct ggml_tensor * k = NULL;
|
||||||
struct ggml_tensor * v;
|
struct ggml_tensor * v = NULL;
|
||||||
|
|
||||||
struct ggml_context * ctx = NULL;
|
struct ggml_context * ctx = NULL;
|
||||||
|
|
||||||
|
@ -482,9 +482,7 @@ struct llama_file_loader {
|
||||||
std::string word = file.read_string(len);
|
std::string word = file.read_string(len);
|
||||||
|
|
||||||
float score = 0.0f;
|
float score = 0.0f;
|
||||||
if (file_version >= LLAMA_FILE_VERSION_GGMF_V1) {
|
|
||||||
file.read_raw(&score, sizeof(score));
|
file.read_raw(&score, sizeof(score));
|
||||||
}
|
|
||||||
|
|
||||||
vocab.token_to_id[word] = i;
|
vocab.token_to_id[word] = i;
|
||||||
|
|
||||||
|
@ -1160,6 +1158,7 @@ static void llama_model_load_internal(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif // GGML_USE_CUBLAS
|
#endif // GGML_USE_CUBLAS
|
||||||
|
|
||||||
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
|
#if defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
|
||||||
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
const int n_gpu = std::min(n_gpu_layers, int(hparams.n_layer));
|
||||||
|
|
||||||
|
@ -1168,6 +1167,10 @@ static void llama_model_load_internal(
|
||||||
fprintf(stderr, "%s: offloading non-repeating layers to GPU\n", __func__);
|
fprintf(stderr, "%s: offloading non-repeating layers to GPU\n", __func__);
|
||||||
}
|
}
|
||||||
size_t vram_kv_cache = 0;
|
size_t vram_kv_cache = 0;
|
||||||
|
|
||||||
|
#ifdef GGML_USE_CUBLAS
|
||||||
|
const int max_backend_supported_layers = hparams.n_layer + 3;
|
||||||
|
const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
|
||||||
if (n_gpu_layers > (int) hparams.n_layer + 1) {
|
if (n_gpu_layers > (int) hparams.n_layer + 1) {
|
||||||
if (low_vram) {
|
if (low_vram) {
|
||||||
fprintf(stderr, "%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
|
fprintf(stderr, "%s: cannot offload v cache to GPU due to low VRAM option\n", __func__);
|
||||||
|
@ -1184,14 +1187,18 @@ static void llama_model_load_internal(
|
||||||
vram_kv_cache += MEM_REQ_KV_SELF().at(model.type) / 2;
|
vram_kv_cache += MEM_REQ_KV_SELF().at(model.type) / 2;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
const int max_offloadable_layers = low_vram ? hparams.n_layer + 1 : hparams.n_layer + 3;
|
#elif defined(GGML_USE_CLBLAST)
|
||||||
|
const int max_backend_supported_layers = hparams.n_layer + 1;
|
||||||
|
const int max_offloadable_layers = hparams.n_layer + 1;
|
||||||
|
#endif // GGML_USE_CUBLAS
|
||||||
|
|
||||||
fprintf(stderr, "%s: offloaded %d/%d layers to GPU\n",
|
fprintf(stderr, "%s: offloaded %d/%d layers to GPU\n",
|
||||||
__func__, std::min(n_gpu_layers, max_offloadable_layers), hparams.n_layer + 3);
|
__func__, std::min(n_gpu_layers, max_offloadable_layers), max_backend_supported_layers);
|
||||||
fprintf(stderr, "%s: total VRAM used: %zu MB\n",
|
fprintf(stderr, "%s: total VRAM used: %zu MB\n",
|
||||||
__func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
|
__func__, (vram_weights + vram_scratch + vram_kv_cache + MB - 1) / MB); // round up
|
||||||
#else
|
#else
|
||||||
(void) n_gpu_layers;
|
(void) n_gpu_layers;
|
||||||
#endif
|
#endif // defined(GGML_USE_CUBLAS) || defined(GGML_USE_CLBLAST)
|
||||||
}
|
}
|
||||||
|
|
||||||
// populate `tensors_by_name`
|
// populate `tensors_by_name`
|
||||||
|
@ -1898,10 +1905,10 @@ void llama_sample_top_p(struct llama_context * ctx, llama_token_data_array * can
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const int64_t t_start_sample_us = ggml_time_us();
|
|
||||||
|
|
||||||
llama_sample_softmax(ctx, candidates);
|
llama_sample_softmax(ctx, candidates);
|
||||||
|
|
||||||
|
const int64_t t_start_sample_us = ggml_time_us();
|
||||||
|
|
||||||
// Compute the cumulative probabilities
|
// Compute the cumulative probabilities
|
||||||
float cum_sum = 0.0f;
|
float cum_sum = 0.0f;
|
||||||
size_t last_idx = candidates->size;
|
size_t last_idx = candidates->size;
|
||||||
|
@ -1930,9 +1937,8 @@ void llama_sample_tail_free(struct llama_context * ctx, llama_token_data_array *
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const int64_t t_start_sample_us = ggml_time_us();
|
|
||||||
|
|
||||||
llama_sample_softmax(nullptr, candidates);
|
llama_sample_softmax(nullptr, candidates);
|
||||||
|
const int64_t t_start_sample_us = ggml_time_us();
|
||||||
|
|
||||||
// Compute the first and second derivatives
|
// Compute the first and second derivatives
|
||||||
std::vector<float> first_derivatives(candidates->size - 1);
|
std::vector<float> first_derivatives(candidates->size - 1);
|
||||||
|
@ -1984,11 +1990,11 @@ void llama_sample_typical(struct llama_context * ctx, llama_token_data_array * c
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
const int64_t t_start_sample_us = ggml_time_us();
|
|
||||||
|
|
||||||
// Compute the softmax of logits and calculate entropy
|
// Compute the softmax of logits and calculate entropy
|
||||||
llama_sample_softmax(nullptr, candidates);
|
llama_sample_softmax(nullptr, candidates);
|
||||||
|
|
||||||
|
const int64_t t_start_sample_us = ggml_time_us();
|
||||||
|
|
||||||
float entropy = 0.0f;
|
float entropy = 0.0f;
|
||||||
for (size_t i = 0; i < candidates->size; ++i) {
|
for (size_t i = 0; i < candidates->size; ++i) {
|
||||||
entropy += -candidates->data[i].p * logf(candidates->data[i].p);
|
entropy += -candidates->data[i].p * logf(candidates->data[i].p);
|
||||||
|
@ -2157,13 +2163,11 @@ llama_token llama_sample_token_mirostat(struct llama_context * ctx, llama_token_
|
||||||
|
|
||||||
if (ctx) {
|
if (ctx) {
|
||||||
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
|
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
|
||||||
ctx->n_sample++;
|
|
||||||
}
|
}
|
||||||
return X;
|
return X;
|
||||||
}
|
}
|
||||||
|
|
||||||
llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
|
llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_token_data_array * candidates, float tau, float eta, float * mu) {
|
||||||
assert(ctx);
|
|
||||||
int64_t t_start_sample_us;
|
int64_t t_start_sample_us;
|
||||||
t_start_sample_us = ggml_time_us();
|
t_start_sample_us = ggml_time_us();
|
||||||
|
|
||||||
|
@ -2178,13 +2182,14 @@ llama_token llama_sample_token_mirostat_v2(struct llama_context * ctx, llama_tok
|
||||||
candidates->size = 1;
|
candidates->size = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (ctx) {
|
||||||
|
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
|
||||||
|
}
|
||||||
|
|
||||||
// Normalize the probabilities of the remaining words
|
// Normalize the probabilities of the remaining words
|
||||||
llama_sample_softmax(ctx, candidates);
|
llama_sample_softmax(ctx, candidates);
|
||||||
|
|
||||||
// Sample the next word X from the remaining words
|
// Sample the next word X from the remaining words
|
||||||
if (ctx) {
|
|
||||||
ctx->t_sample_us += ggml_time_us() - t_start_sample_us;
|
|
||||||
}
|
|
||||||
llama_token X = llama_sample_token(ctx, candidates);
|
llama_token X = llama_sample_token(ctx, candidates);
|
||||||
t_start_sample_us = ggml_time_us();
|
t_start_sample_us = ggml_time_us();
|
||||||
|
|
||||||
|
@ -2252,10 +2257,10 @@ static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llam
|
||||||
}
|
}
|
||||||
float * f32_output = (float *) output.addr;
|
float * f32_output = (float *) output.addr;
|
||||||
|
|
||||||
quantize_fns_t qtype;
|
ggml_type_traits_t qtype;
|
||||||
if (ggml_is_quantized(tensor.type)) {
|
if (ggml_is_quantized(tensor.type)) {
|
||||||
qtype = ggml_internal_get_quantize_fn(tensor.type);
|
qtype = ggml_internal_get_type_traits(tensor.type);
|
||||||
if (qtype.dequantize_row_q == NULL) {
|
if (qtype.to_float == NULL) {
|
||||||
throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor.type)));
|
throw std::runtime_error(format("type %s unsupported for integer quantization: no dequantization available", ggml_type_name(tensor.type)));
|
||||||
}
|
}
|
||||||
} else if (tensor.type != GGML_TYPE_F16) {
|
} else if (tensor.type != GGML_TYPE_F16) {
|
||||||
|
@ -2266,7 +2271,7 @@ static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llam
|
||||||
if (tensor.type == GGML_TYPE_F16) {
|
if (tensor.type == GGML_TYPE_F16) {
|
||||||
ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor.data, f32_output, nelements);
|
ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor.data, f32_output, nelements);
|
||||||
} else if (ggml_is_quantized(tensor.type)) {
|
} else if (ggml_is_quantized(tensor.type)) {
|
||||||
qtype.dequantize_row_q(tensor.data, f32_output, nelements);
|
qtype.to_float(tensor.data, f32_output, nelements);
|
||||||
} else {
|
} else {
|
||||||
LLAMA_ASSERT(false); // unreachable
|
LLAMA_ASSERT(false); // unreachable
|
||||||
}
|
}
|
||||||
|
@ -2291,7 +2296,7 @@ static void llama_convert_tensor_internal(const llama_load_tensor & tensor, llam
|
||||||
if (typ == GGML_TYPE_F16) {
|
if (typ == GGML_TYPE_F16) {
|
||||||
ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
|
ggml_fp16_to_fp32_row((ggml_fp16_t *)inbuf, outbuf, nels);
|
||||||
} else {
|
} else {
|
||||||
qtype.dequantize_row_q(inbuf, outbuf, nels);
|
qtype.to_float(inbuf, outbuf, nels);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
workers.push_back(std::thread(compute, tensor.type, tensor.data + in_buff_offs, f32_output + out_buff_offs, thr_elems));
|
workers.push_back(std::thread(compute, tensor.type, tensor.data + in_buff_offs, f32_output + out_buff_offs, thr_elems));
|
||||||
|
@ -2406,9 +2411,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
|
||||||
int ny = tensor.ne.at(1);
|
int ny = tensor.ne.at(1);
|
||||||
if (nx % QK_K != 0 || ny % QK_K != 0) {
|
if (nx % QK_K != 0 || ny % QK_K != 0) {
|
||||||
fprintf(stderr, "\n\n========================= Tensor sizes %d x %d are not divisible by %d\n",nx,ny,QK_K);
|
fprintf(stderr, "\n\n========================= Tensor sizes %d x %d are not divisible by %d\n",nx,ny,QK_K);
|
||||||
fprintf(stderr, "This is required to be able to use k-quants for now!\n");
|
fprintf(stderr, "Verify before using\n");
|
||||||
fprintf(stderr, "========================================================================================\n\n");
|
fprintf(stderr, "========================================================================================\n\n");
|
||||||
throw std::runtime_error("Unsupported tensor size encountered\n");
|
// throw std::runtime_error("Unsupported tensor size encountered\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (tensor.name == "output.weight") {
|
if (tensor.name == "output.weight") {
|
||||||
|
@ -3476,23 +3481,35 @@ llama_token llama_token_nl() {
|
||||||
return 13;
|
return 13;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct llama_timings llama_get_timings(struct llama_context * ctx) {
|
||||||
|
struct llama_timings result = {
|
||||||
|
/*.t_start_ms =*/ 1e-3 * ctx->t_start_us,
|
||||||
|
/*.t_end_ms =*/ 1.00 * ggml_time_ms(),
|
||||||
|
/*.t_load_ms =*/ 1e-3 * ctx->t_load_us,
|
||||||
|
/*.t_sample_ms =*/ 1e-3 * ctx->t_sample_us,
|
||||||
|
/*.t_p_eval_ms =*/ 1e-3 * ctx->t_p_eval_us,
|
||||||
|
/*.t_eval_ms =*/ 1e-3 * ctx->t_eval_us,
|
||||||
|
|
||||||
|
/*.n_sample =*/ std::max(1, ctx->n_sample),
|
||||||
|
/*.n_p_eval =*/ std::max(1, ctx->n_p_eval),
|
||||||
|
/*.n_eval =*/ std::max(1, ctx->n_eval),
|
||||||
|
};
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
void llama_print_timings(struct llama_context * ctx) {
|
void llama_print_timings(struct llama_context * ctx) {
|
||||||
const int64_t t_end_us = ggml_time_us();
|
const llama_timings timings = llama_get_timings(ctx);
|
||||||
|
|
||||||
const int32_t n_sample = std::max(1, ctx->n_sample);
|
|
||||||
const int32_t n_eval = std::max(1, ctx->n_eval);
|
|
||||||
const int32_t n_p_eval = std::max(1, ctx->n_p_eval);
|
|
||||||
|
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, ctx->t_load_us / 1000.0);
|
fprintf(stderr, "%s: load time = %8.2f ms\n", __func__, timings.t_load_ms);
|
||||||
fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
fprintf(stderr, "%s: sample time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||||
__func__, 1e-3 * ctx->t_sample_us, n_sample, 1e-3 * ctx->t_sample_us / n_sample, 1e6 / ctx->t_sample_us * n_sample);
|
__func__, timings.t_sample_ms, timings.n_sample, timings.t_sample_ms / timings.n_sample, 1e3 / timings.t_sample_ms * timings.n_sample);
|
||||||
fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
|
fprintf(stderr, "%s: prompt eval time = %8.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||||
__func__, 1e-3 * ctx->t_p_eval_us, n_p_eval, 1e-3 * ctx->t_p_eval_us / n_p_eval, 1e6 / ctx->t_p_eval_us * n_p_eval);
|
__func__, timings.t_p_eval_ms, timings.n_p_eval, timings.t_p_eval_ms / timings.n_p_eval, 1e3 / timings.t_p_eval_ms * timings.n_p_eval);
|
||||||
fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
fprintf(stderr, "%s: eval time = %8.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n",
|
||||||
__func__, 1e-3 * ctx->t_eval_us, n_eval, 1e-3 * ctx->t_eval_us / n_eval, 1e6 / ctx->t_eval_us * n_eval);
|
__func__, timings.t_eval_ms, timings.n_eval, timings.t_eval_ms / timings.n_eval, 1e3 / timings.t_eval_ms * timings.n_eval);
|
||||||
fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (t_end_us - ctx->t_start_us)/1000.0);
|
fprintf(stderr, "%s: total time = %8.2f ms\n", __func__, (timings.t_end_ms - timings.t_start_ms));
|
||||||
}
|
}
|
||||||
|
|
||||||
void llama_reset_timings(struct llama_context * ctx) {
|
void llama_reset_timings(struct llama_context * ctx) {
|
||||||
|
|
15
llama.h
15
llama.h
|
@ -134,6 +134,20 @@ extern "C" {
|
||||||
bool quantize_output_tensor; // quantize output.weight
|
bool quantize_output_tensor; // quantize output.weight
|
||||||
} llama_model_quantize_params;
|
} llama_model_quantize_params;
|
||||||
|
|
||||||
|
// performance timing information
|
||||||
|
struct llama_timings {
|
||||||
|
double t_start_ms;
|
||||||
|
double t_end_ms;
|
||||||
|
double t_load_ms;
|
||||||
|
double t_sample_ms;
|
||||||
|
double t_p_eval_ms;
|
||||||
|
double t_eval_ms;
|
||||||
|
|
||||||
|
int32_t n_sample;
|
||||||
|
int32_t n_p_eval;
|
||||||
|
int32_t n_eval;
|
||||||
|
};
|
||||||
|
|
||||||
LLAMA_API struct llama_context_params llama_context_default_params();
|
LLAMA_API struct llama_context_params llama_context_default_params();
|
||||||
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params();
|
LLAMA_API struct llama_model_quantize_params llama_model_quantize_default_params();
|
||||||
|
|
||||||
|
@ -331,6 +345,7 @@ extern "C" {
|
||||||
LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
|
LLAMA_API llama_token llama_sample_token(struct llama_context * ctx, llama_token_data_array * candidates);
|
||||||
|
|
||||||
// Performance information
|
// Performance information
|
||||||
|
LLAMA_API struct llama_timings llama_get_timings(struct llama_context * ctx);
|
||||||
LLAMA_API void llama_print_timings(struct llama_context * ctx);
|
LLAMA_API void llama_print_timings(struct llama_context * ctx);
|
||||||
LLAMA_API void llama_reset_timings(struct llama_context * ctx);
|
LLAMA_API void llama_reset_timings(struct llama_context * ctx);
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
echo This file is only for my own usage, please do not use it. I am lazy.
|
echo This file is only for my own usage, please do not use it. I am lazy.
|
||||||
|
|
||||||
set PATH=d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python;d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python\\Scripts;%PATH%
|
set PATH=d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python;d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python\\Scripts;%PATH%
|
||||||
PyInstaller --noconfirm --onefile --clean --console --icon "./niko.ico" --add-data "./klite.embd;." --add-data "./koboldcpp.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_openblas_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe"
|
PyInstaller --noconfirm --onefile --clean --console --collect-all customtkinter --icon "./niko.ico" --add-data "./klite.embd;." --add-data "./koboldcpp.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_openblas_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe"
|
|
@ -1,4 +1,4 @@
|
||||||
echo This file is only for my own usage, please do not use it. I am lazy.
|
echo This file is only for my own usage, please do not use it. I am lazy.
|
||||||
|
|
||||||
set PATH=d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python;d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python\\Scripts;%PATH%
|
set PATH=d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python;d:\\MainApplications\\KoboldAIGPT\\KoboldAI-Horde-Bridge\\python\\Scripts;%PATH%
|
||||||
PyInstaller --noconfirm --onefile --clean --console --icon "./nikogreen.ico" --add-data "./klite.embd;." --add-data "./koboldcpp.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_openblas_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./koboldcpp_cublas.dll;." --add-data "./cublas64_11.dll;." --add-data "./cublasLt64_11.dll;." --add-data "./cudart64_110.dll;." --add-data "./msvcp140.dll;." --add-data "./vcruntime140.dll;." --add-data "./vcruntime140_1.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe"
|
PyInstaller --noconfirm --onefile --clean --console --collect-all customtkinter --icon "./nikogreen.ico" --add-data "./klite.embd;." --add-data "./koboldcpp.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_openblas_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./koboldcpp_cublas.dll;." --add-data "./cublas64_11.dll;." --add-data "./cublasLt64_11.dll;." --add-data "./cudart64_110.dll;." --add-data "./msvcp140.dll;." --add-data "./vcruntime140.dll;." --add-data "./vcruntime140_1.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe"
|
|
@ -1 +1 @@
|
||||||
PyInstaller --noconfirm --onefile --clean --console --icon "./niko.ico" --add-data "./klite.embd;." --add-data "./koboldcpp.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_openblas_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe"
|
PyInstaller --noconfirm --onefile --clean --console --collect-all customtkinter --icon "./niko.ico" --add-data "./klite.embd;." --add-data "./koboldcpp.dll;." --add-data "./koboldcpp_openblas.dll;." --add-data "./koboldcpp_failsafe.dll;." --add-data "./koboldcpp_openblas_noavx2.dll;." --add-data "./libopenblas.dll;." --add-data "./koboldcpp_clblast.dll;." --add-data "./clblast.dll;." --add-data "./rwkv_vocab.embd;." --add-data "./rwkv_world_vocab.embd;." "./koboldcpp.py" -n "koboldcpp.exe"
|
|
@ -1,6 +1,6 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
pyinstaller --noconfirm --onefile --clean --console --icon "./niko.ico" \
|
pyinstaller --noconfirm --onefile --clean --console --collect-all customtkinter --icon "./niko.ico" \
|
||||||
--add-data "./klite.embd:." \
|
--add-data "./klite.embd:." \
|
||||||
--add-data "./koboldcpp.so:." \
|
--add-data "./koboldcpp.so:." \
|
||||||
--add-data "./koboldcpp_openblas.so:." \
|
--add-data "./koboldcpp_openblas.so:." \
|
||||||
|
@ -8,4 +8,5 @@ pyinstaller --noconfirm --onefile --clean --console --icon "./niko.ico" \
|
||||||
--add-data "./koboldcpp_openblas_noavx2.so:." \
|
--add-data "./koboldcpp_openblas_noavx2.so:." \
|
||||||
--add-data "./koboldcpp_clblast.so:." \
|
--add-data "./koboldcpp_clblast.so:." \
|
||||||
--add-data "./rwkv_vocab.embd:." \
|
--add-data "./rwkv_vocab.embd:." \
|
||||||
|
--add-data "./rwkv_world_vocab.embd:." \
|
||||||
"./koboldcpp.py" -n "koboldcpp"
|
"./koboldcpp.py" -n "koboldcpp"
|
||||||
|
|
|
@ -472,6 +472,9 @@ static const size_t CACHE_LINE_SIZE_F32 = CACHE_LINE_SIZE/sizeof(float);
|
||||||
//
|
//
|
||||||
// quantization
|
// quantization
|
||||||
//
|
//
|
||||||
|
#ifndef MM256_SET_M128I
|
||||||
|
#define MM256_SET_M128I(a, b) _mm256_insertf128_si256(_mm256_castsi128_si256(b), (a), 1)
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
|
#if defined(__AVX__) || defined(__AVX2__) || defined(__AVX512F__) || defined(__SSSE3__)
|
||||||
// multiply int8_t, add results pairwise twice
|
// multiply int8_t, add results pairwise twice
|
||||||
|
@ -532,7 +535,7 @@ static inline __m256i bytes_from_bits_32(const uint8_t * x) {
|
||||||
static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
|
static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
|
||||||
{
|
{
|
||||||
const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
|
const __m128i tmp = _mm_loadu_si128((const __m128i *)rsi);
|
||||||
const __m256i bytes = _mm256_set_m128i(_mm_srli_epi16(tmp, 4), tmp);
|
const __m256i bytes = MM256_SET_M128I(_mm_srli_epi16(tmp, 4), tmp);
|
||||||
const __m256i lowMask = _mm256_set1_epi8( 0xF );
|
const __m256i lowMask = _mm256_set1_epi8( 0xF );
|
||||||
return _mm256_and_si256(lowMask, bytes);
|
return _mm256_and_si256(lowMask, bytes);
|
||||||
}
|
}
|
||||||
|
@ -605,7 +608,7 @@ static inline __m256i bytes_from_bits_32(const uint8_t * x) {
|
||||||
bytesh = _mm_or_si128(bytesh, bit_mask);
|
bytesh = _mm_or_si128(bytesh, bit_mask);
|
||||||
bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
|
bytesl = _mm_cmpeq_epi8(bytesl, _mm_set1_epi64x(-1));
|
||||||
bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
|
bytesh = _mm_cmpeq_epi8(bytesh, _mm_set1_epi64x(-1));
|
||||||
return _mm256_set_m128i(bytesh, bytesl);
|
return MM256_SET_M128I(bytesh, bytesl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Unpack 32 4-bit fields into 32 bytes
|
// Unpack 32 4-bit fields into 32 bytes
|
||||||
|
@ -618,7 +621,7 @@ static inline __m256i bytes_from_nibbles_32(const uint8_t * rsi)
|
||||||
const __m128i lowMask = _mm_set1_epi8(0xF);
|
const __m128i lowMask = _mm_set1_epi8(0xF);
|
||||||
tmpl = _mm_and_si128(lowMask, tmpl);
|
tmpl = _mm_and_si128(lowMask, tmpl);
|
||||||
tmph = _mm_and_si128(lowMask, tmph);
|
tmph = _mm_and_si128(lowMask, tmph);
|
||||||
return _mm256_set_m128i(tmph, tmpl);
|
return MM256_SET_M128I(tmph, tmpl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// add int16_t pairwise and return as float vector
|
// add int16_t pairwise and return as float vector
|
||||||
|
@ -626,7 +629,7 @@ static inline __m256 sum_i16_pairs_float(const __m128i xh, const __m128i xl) {
|
||||||
const __m128i ones = _mm_set1_epi16(1);
|
const __m128i ones = _mm_set1_epi16(1);
|
||||||
const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
|
const __m128i summed_pairsl = _mm_madd_epi16(ones, xl);
|
||||||
const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
|
const __m128i summed_pairsh = _mm_madd_epi16(ones, xh);
|
||||||
const __m256i summed_pairs = _mm256_set_m128i(summed_pairsh, summed_pairsl);
|
const __m256i summed_pairs = MM256_SET_M128I(summed_pairsh, summed_pairsl);
|
||||||
return _mm256_cvtepi32_ps(summed_pairs);
|
return _mm256_cvtepi32_ps(summed_pairs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2246,7 +2249,7 @@ static void ggml_v2_vec_dot_q4_0_q8_0(const int n, float * restrict s, const voi
|
||||||
const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
|
const __m128i i32_1 = mul_sum_i8_pairs(bx, by);
|
||||||
|
|
||||||
// Convert int32_t to float
|
// Convert int32_t to float
|
||||||
__m256 p = _mm256_cvtepi32_ps(_mm256_set_m128i(i32_0, i32_1));
|
__m256 p = _mm256_cvtepi32_ps(MM256_SET_M128I(i32_0, i32_1));
|
||||||
|
|
||||||
// Apply the scale, and accumulate
|
// Apply the scale, and accumulate
|
||||||
acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
|
acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
|
||||||
|
@ -2727,7 +2730,7 @@ static void ggml_v2_vec_dot_q5_0_q8_0(const int n, float * restrict s, const voi
|
||||||
__m128i bxh = _mm256_extractf128_si256(bx, 1);
|
__m128i bxh = _mm256_extractf128_si256(bx, 1);
|
||||||
bxl = _mm_or_si128(bxl, bxhil);
|
bxl = _mm_or_si128(bxl, bxhil);
|
||||||
bxh = _mm_or_si128(bxh, bxhih);
|
bxh = _mm_or_si128(bxh, bxhih);
|
||||||
bx = _mm256_set_m128i(bxh, bxl);
|
bx = MM256_SET_M128I(bxh, bxl);
|
||||||
|
|
||||||
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
||||||
|
|
||||||
|
@ -2989,7 +2992,7 @@ static void ggml_v2_vec_dot_q5_1_q8_1(const int n, float * restrict s, const voi
|
||||||
__m128i bxh = _mm256_extractf128_si256(bx, 1);
|
__m128i bxh = _mm256_extractf128_si256(bx, 1);
|
||||||
bxl = _mm_or_si128(bxl, bxhil);
|
bxl = _mm_or_si128(bxl, bxhil);
|
||||||
bxh = _mm_or_si128(bxh, bxhih);
|
bxh = _mm_or_si128(bxh, bxhih);
|
||||||
bx = _mm256_set_m128i(bxh, bxl);
|
bx = MM256_SET_M128I(bxh, bxl);
|
||||||
|
|
||||||
const __m256 dy = _mm256_broadcast_ss(&y[i].d);
|
const __m256 dy = _mm256_broadcast_ss(&y[i].d);
|
||||||
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
||||||
|
@ -17417,7 +17420,7 @@ static void ggml_v2_vec_dot_q4_0_q8_0_v2(const int n, float * restrict s, const
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert int32_t to float
|
// Convert int32_t to float
|
||||||
__m256 p = _mm256_cvtepi32_ps( _mm256_set_m128i( i32[0], i32[1] ));
|
__m256 p = _mm256_cvtepi32_ps( MM256_SET_M128I( i32[0], i32[1] ));
|
||||||
// Apply the scale, and accumulate
|
// Apply the scale, and accumulate
|
||||||
acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
|
acc = _mm256_add_ps(_mm256_mul_ps( d, p ), acc);
|
||||||
}
|
}
|
||||||
|
@ -17686,7 +17689,7 @@ static void ggml_v2_vec_dot_q4_2_q8_0_v2(const int n, float * restrict s, const
|
||||||
|
|
||||||
__m128i bx0 = bytes_from_nibbles_16(x[2*i + 0].qs);
|
__m128i bx0 = bytes_from_nibbles_16(x[2*i + 0].qs);
|
||||||
__m128i bx1 = bytes_from_nibbles_16(x[2*i + 1].qs);
|
__m128i bx1 = bytes_from_nibbles_16(x[2*i + 1].qs);
|
||||||
__m256i bx = _mm256_set_m128i(bx1, bx0);
|
__m256i bx = MM256_SET_M128I(bx1, bx0);
|
||||||
|
|
||||||
// Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
|
// Now we have a vector with bytes in [ 0 .. 15 ] interval. Offset them into [ -8 .. +7 ] interval.
|
||||||
const __m256i off = _mm256_set1_epi8(8);
|
const __m256i off = _mm256_set1_epi8(8);
|
||||||
|
@ -17819,7 +17822,7 @@ static void ggml_v2_vec_dot_q4_3_q8_1_v2(const int n, float * restrict s, const
|
||||||
|
|
||||||
const __m128i bx0 = bytes_from_nibbles_16(x[2*i + 0].qs);
|
const __m128i bx0 = bytes_from_nibbles_16(x[2*i + 0].qs);
|
||||||
const __m128i bx1 = bytes_from_nibbles_16(x[2*i + 1].qs);
|
const __m128i bx1 = bytes_from_nibbles_16(x[2*i + 1].qs);
|
||||||
const __m256i bx = _mm256_set_m128i(bx1, bx0);
|
const __m256i bx = MM256_SET_M128I(bx1, bx0);
|
||||||
|
|
||||||
const __m256 dy = _mm256_broadcast_ss(&y[i].d);
|
const __m256 dy = _mm256_broadcast_ss(&y[i].d);
|
||||||
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
const __m256i by = _mm256_loadu_si256((const __m256i *)y[i].qs);
|
||||||
|
|
|
@ -348,7 +348,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
||||||
const auto & hparams = model.hparams;
|
const auto & hparams = model.hparams;
|
||||||
size_t vram_total = 0;
|
size_t vram_total = 0;
|
||||||
const int n_gpu = std::min(gpulayers, int(hparams.n_layer));
|
const int n_gpu = std::min(gpulayers, int(hparams.n_layer));
|
||||||
fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu);
|
fprintf(stderr, "%s: [GPU] offloading %d layers to GPU\n", __func__, n_gpu);
|
||||||
for (int i = 0; i < n_gpu; ++i) {
|
for (int i = 0; i < n_gpu; ++i) {
|
||||||
const auto & layer = model.layers[i];
|
const auto & layer = model.layers[i];
|
||||||
layer.c_attn_q_proj_w->backend = GGML_BACKEND_GPU;
|
layer.c_attn_q_proj_w->backend = GGML_BACKEND_GPU;
|
||||||
|
@ -373,7 +373,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g
|
||||||
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s: [opencl] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -301,7 +301,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
|
||||||
const auto & hparams = model.hparams;
|
const auto & hparams = model.hparams;
|
||||||
size_t vram_total = 0;
|
size_t vram_total = 0;
|
||||||
const int n_gpu = std::min(gpulayers, int(hparams.n_layers));
|
const int n_gpu = std::min(gpulayers, int(hparams.n_layers));
|
||||||
fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu);
|
fprintf(stderr, "%s: [GPU] offloading %d layers to GPU\n", __func__, n_gpu);
|
||||||
for (int i = 0; i < n_gpu; ++i) {
|
for (int i = 0; i < n_gpu; ++i) {
|
||||||
const auto & layer = model.layers[i];
|
const auto & layer = model.layers[i];
|
||||||
layer.ffn_up_proj->backend = GGML_BACKEND_GPU;
|
layer.ffn_up_proj->backend = GGML_BACKEND_GPU;
|
||||||
|
@ -320,7 +320,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo
|
||||||
ggml_cuda_transform_tensor(layer.c_attn_out_proj_weight->data,layer.c_attn_out_proj_weight); vram_total += ggml_nbytes(layer.c_attn_out_proj_weight);
|
ggml_cuda_transform_tensor(layer.c_attn_out_proj_weight->data,layer.c_attn_out_proj_weight); vram_total += ggml_nbytes(layer.c_attn_out_proj_weight);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s: [opencl] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -335,7 +335,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
||||||
const auto & hparams = model.hparams;
|
const auto & hparams = model.hparams;
|
||||||
size_t vram_total = 0;
|
size_t vram_total = 0;
|
||||||
const int n_gpu = std::min(gpulayers, int(hparams.n_layer));
|
const int n_gpu = std::min(gpulayers, int(hparams.n_layer));
|
||||||
fprintf(stderr, "%s: [opencl] offloading %d layers to GPU\n", __func__, n_gpu);
|
fprintf(stderr, "%s: [GPU] offloading %d layers to GPU\n", __func__, n_gpu);
|
||||||
for (int i = 0; i < n_gpu; ++i) {
|
for (int i = 0; i < n_gpu; ++i) {
|
||||||
const auto & layer = model.layers[i];
|
const auto & layer = model.layers[i];
|
||||||
layer.c_attn_attn_w->backend = GGML_BACKEND_GPU;
|
layer.c_attn_attn_w->backend = GGML_BACKEND_GPU;
|
||||||
|
@ -354,7 +354,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &
|
||||||
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
ggml_cuda_transform_tensor(layer.c_mlp_proj_w->data,layer.c_mlp_proj_w); vram_total += ggml_nbytes(layer.c_mlp_proj_w);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
fprintf(stderr, "%s: [opencl] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
fprintf(stderr, "%s: [GPU] total VRAM used: %zu MB\n", __func__, vram_total / 1024 / 1024);
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
|
@ -6,6 +6,13 @@
|
||||||
#include "rwkv_v3.h"
|
#include "rwkv_v3.h"
|
||||||
#include "ggml.h"
|
#include "ggml.h"
|
||||||
|
|
||||||
|
#ifdef GGML_USE_CUBLAS
|
||||||
|
#include "ggml-cuda.h"
|
||||||
|
#endif
|
||||||
|
#if defined(GGML_USE_CLBLAST)
|
||||||
|
#include "ggml-opencl.h"
|
||||||
|
#endif
|
||||||
|
|
||||||
#include <string>
|
#include <string>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include <cstring>
|
#include <cstring>
|
||||||
|
@ -1058,7 +1065,11 @@ struct rwkv_future_tensor rwkv_future_graph_work(struct rwkv_future_ctx & ctx,
|
||||||
const size_t n_threads,
|
const size_t n_threads,
|
||||||
const size_t sequence_len = 1
|
const size_t sequence_len = 1
|
||||||
) {
|
) {
|
||||||
|
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||||
|
enum ggml_type mul_mat_type = type == GGML_TYPE_F32 ? GGML_TYPE_F32 : GGML_TYPE_F16;
|
||||||
|
#else
|
||||||
enum ggml_type mul_mat_type = ggml_is_quantized(type) ? GGML_TYPE_Q8_1 : type;
|
enum ggml_type mul_mat_type = ggml_is_quantized(type) ? GGML_TYPE_Q8_1 : type;
|
||||||
|
#endif
|
||||||
return ctx.alloc(GGML_TYPE_I8, rwkv_future_tensor::size(mul_mat_type, ffn_key_height, sequence_len) * n_threads + 64 * (n_threads - 1));
|
return ctx.alloc(GGML_TYPE_I8, rwkv_future_tensor::size(mul_mat_type, ffn_key_height, sequence_len) * n_threads + 64 * (n_threads - 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1545,7 +1556,38 @@ struct rwkv_context * rwkv_clone_context(struct rwkv_context * ctx, const uint32
|
||||||
}
|
}
|
||||||
|
|
||||||
bool rwkv_gpu_offload_layers(struct rwkv_context * ctx, const uint32_t n_layers) {
|
bool rwkv_gpu_offload_layers(struct rwkv_context * ctx, const uint32_t n_layers) {
|
||||||
|
#if defined(GGML_USE_CLBLAST) || defined(GGML_USE_CUBLAS)
|
||||||
|
printf("\nOffloading %u (or fewer) layers...",n_layers);
|
||||||
|
const auto offload = [&](struct ggml_tensor * tensor) {
|
||||||
|
// TODO support multi-GPU
|
||||||
|
tensor->backend = GGML_BACKEND_GPU;
|
||||||
|
#if defined(GGML_USE_CLBLAST)
|
||||||
|
ggml_cl_transform_tensor(tensor->data, tensor);
|
||||||
|
#else
|
||||||
|
ggml_cuda_transform_tensor(tensor->data, tensor);
|
||||||
|
#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
const size_t n_gpu = std::min(n_layers, ctx->instance->model.header.n_layer);
|
||||||
|
|
||||||
|
if (ctx->gpu_layers < n_gpu) {
|
||||||
|
for (size_t & i = ctx->gpu_layers; i < n_gpu; i++) {
|
||||||
|
const struct rwkv_layer & layer = ctx->instance->model.layers[i];
|
||||||
|
|
||||||
|
// TODO also offload other operations to GPU with ggml_cuda_assign_buffers
|
||||||
|
offload(layer.att_key);
|
||||||
|
offload(layer.att_value);
|
||||||
|
offload(layer.att_receptance);
|
||||||
|
offload(layer.att_output);
|
||||||
|
|
||||||
|
offload(layer.ffn_key);
|
||||||
|
offload(layer.ffn_value);
|
||||||
|
offload(layer.ffn_receptance);
|
||||||
|
}
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue